code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "helpers/memenv/memenv.h" #include "leveldb/env.h" #include "leveldb/status.h" #include "port/port.h" #include "util/mutexlock.h" #include <map> #include <string.h> #include <string> #include <vector> namespace leveldb { namespace { class FileState { public: // FileStates are reference counted. The initial reference count is zero // and the caller must call Ref() at least once. FileState() : refs_(0), size_(0) {} // Increase the reference count. void Ref() { MutexLock lock(&refs_mutex_); ++refs_; } // Decrease the reference count. Delete if this is the last reference. void Unref() { bool do_delete = false; { MutexLock lock(&refs_mutex_); --refs_; assert(refs_ >= 0); if (refs_ <= 0) { do_delete = true; } } if (do_delete) { delete this; } } uint64_t Size() const { return size_; } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { if (offset > size_) { return Status::IOError("Offset greater than file size."); } const uint64_t available = size_ - offset; if (n > available) { n = available; } if (n == 0) { *result = Slice(); return Status::OK(); } size_t block = offset / kBlockSize; size_t block_offset = offset % kBlockSize; if (n <= kBlockSize - block_offset) { // The requested bytes are all in the first block. *result = Slice(blocks_[block] + block_offset, n); return Status::OK(); } size_t bytes_to_copy = n; char* dst = scratch; while (bytes_to_copy > 0) { size_t avail = kBlockSize - block_offset; if (avail > bytes_to_copy) { avail = bytes_to_copy; } memcpy(dst, blocks_[block] + block_offset, avail); bytes_to_copy -= avail; dst += avail; block++; block_offset = 0; } *result = Slice(scratch, n); return Status::OK(); } Status Append(const Slice& data) { const char* src = data.data(); size_t src_len = data.size(); while (src_len > 0) { size_t avail; size_t offset = size_ % kBlockSize; if (offset != 0) { // There is some room in the last block. avail = kBlockSize - offset; } else { // No room in the last block; push new one. blocks_.push_back(new char[kBlockSize]); avail = kBlockSize; } if (avail > src_len) { avail = src_len; } memcpy(blocks_.back() + offset, src, avail); src_len -= avail; src += avail; size_ += avail; } return Status::OK(); } private: // Private since only Unref() should be used to delete it. ~FileState() { for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end(); ++i) { delete [] *i; } } // No copying allowed. FileState(const FileState&); void operator=(const FileState&); port::Mutex refs_mutex_; int refs_; // Protected by refs_mutex_; // The following fields are not protected by any mutex. They are only mutable // while the file is being written, and concurrent access is not allowed // to writable files. std::vector<char*> blocks_; uint64_t size_; enum { kBlockSize = 8 * 1024 }; }; class SequentialFileImpl : public SequentialFile { public: explicit SequentialFileImpl(FileState* file) : file_(file), pos_(0) { file_->Ref(); } ~SequentialFileImpl() { file_->Unref(); } virtual Status Read(size_t n, Slice* result, char* scratch) { Status s = file_->Read(pos_, n, result, scratch); if (s.ok()) { pos_ += result->size(); } return s; } virtual Status Skip(uint64_t n) { if (pos_ > file_->Size()) { return Status::IOError("pos_ > file_->Size()"); } const size_t available = file_->Size() - pos_; if (n > available) { n = available; } pos_ += n; return Status::OK(); } private: FileState* file_; size_t pos_; }; class RandomAccessFileImpl : public RandomAccessFile { public: explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); } ~RandomAccessFileImpl() { file_->Unref(); } virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { return file_->Read(offset, n, result, scratch); } private: FileState* file_; }; class WritableFileImpl : public WritableFile { public: WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); } ~WritableFileImpl() { file_->Unref(); } virtual Status Append(const Slice& data) { return file_->Append(data); } virtual Status Close() { return Status::OK(); } virtual Status Flush() { return Status::OK(); } virtual Status Sync() { return Status::OK(); } private: FileState* file_; }; class InMemoryEnv : public EnvWrapper { public: explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { } virtual ~InMemoryEnv() { for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){ i->second->Unref(); } } // Partial implementation of the Env interface. virtual Status NewSequentialFile(const std::string& fname, SequentialFile** result) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { *result = NULL; return Status::IOError(fname, "File not found"); } *result = new SequentialFileImpl(file_map_[fname]); return Status::OK(); } virtual Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { *result = NULL; return Status::IOError(fname, "File not found"); } *result = new RandomAccessFileImpl(file_map_[fname]); return Status::OK(); } virtual Status NewWritableFile(const std::string& fname, WritableFile** result) { MutexLock lock(&mutex_); if (file_map_.find(fname) != file_map_.end()) { DeleteFileInternal(fname); } FileState* file = new FileState(); file->Ref(); file_map_[fname] = file; *result = new WritableFileImpl(file); return Status::OK(); } virtual bool FileExists(const std::string& fname) { MutexLock lock(&mutex_); return file_map_.find(fname) != file_map_.end(); } virtual Status GetChildren(const std::string& dir, std::vector<std::string>* result) { MutexLock lock(&mutex_); result->clear(); for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){ const std::string& filename = i->first; if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' && Slice(filename).starts_with(Slice(dir))) { result->push_back(filename.substr(dir.size() + 1)); } } return Status::OK(); } void DeleteFileInternal(const std::string& fname) { if (file_map_.find(fname) == file_map_.end()) { return; } file_map_[fname]->Unref(); file_map_.erase(fname); } virtual Status DeleteFile(const std::string& fname) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); } DeleteFileInternal(fname); return Status::OK(); } virtual Status CreateDir(const std::string& dirname) { return Status::OK(); } virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); } virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); } *file_size = file_map_[fname]->Size(); return Status::OK(); } virtual Status RenameFile(const std::string& src, const std::string& target) { MutexLock lock(&mutex_); if (file_map_.find(src) == file_map_.end()) { return Status::IOError(src, "File not found"); } DeleteFileInternal(target); file_map_[target] = file_map_[src]; file_map_.erase(src); return Status::OK(); } virtual Status LockFile(const std::string& fname, FileLock** lock) { *lock = new FileLock; return Status::OK(); } virtual Status UnlockFile(FileLock* lock) { delete lock; return Status::OK(); } virtual Status GetTestDirectory(std::string* path) { *path = "/test"; return Status::OK(); } private: // Map from filenames to FileState objects, representing a simple file system. typedef std::map<std::string, FileState*> FileSystem; port::Mutex mutex_; FileSystem file_map_; // Protected by mutex_. }; } Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); } }
zzxiaogx-leveldb
helpers/memenv/memenv.cc
C++
bsd
9,122
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ #define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ namespace leveldb { class Env; // Returns a new environment that stores its data in memory and delegates // all non-file-storage tasks to base_env. The caller must delete the result // when it is no longer needed. // *base_env must remain live while the result is in use. Env* NewMemEnv(Env* base_env); } #endif // STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
zzxiaogx-leveldb
helpers/memenv/memenv.h
C++
bsd
677
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "helpers/memenv/memenv.h" #include "db/db_impl.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "util/testharness.h" #include <string> #include <vector> namespace leveldb { class MemEnvTest { public: Env* env_; MemEnvTest() : env_(NewMemEnv(Env::Default())) { } ~MemEnvTest() { delete env_; } }; TEST(MemEnvTest, Basics) { uint64_t file_size; WritableFile* writable_file; std::vector<std::string> children; ASSERT_OK(env_->CreateDir("/dir")); // Check that the directory is empty. ASSERT_TRUE(!env_->FileExists("/dir/non_existent")); ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok()); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); // Create a file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); delete writable_file; // Check that the file exists. ASSERT_TRUE(env_->FileExists("/dir/f")); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(0, file_size); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(1, children.size()); ASSERT_EQ("f", children[0]); // Write to the file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("abc")); delete writable_file; // Check for expected size. ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(3, file_size); // Check that renaming works. ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/g")); ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); ASSERT_EQ(3, file_size); // Check that opening non-existent file fails. SequentialFile* seq_file; RandomAccessFile* rand_file; ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok()); ASSERT_TRUE(!seq_file); ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok()); ASSERT_TRUE(!rand_file); // Check that deleting works. ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok()); ASSERT_OK(env_->DeleteFile("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g")); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); ASSERT_OK(env_->DeleteDir("/dir")); } TEST(MemEnvTest, ReadWrite) { WritableFile* writable_file; SequentialFile* seq_file; RandomAccessFile* rand_file; Slice result; char scratch[100]; ASSERT_OK(env_->CreateDir("/dir")); ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("hello ")); ASSERT_OK(writable_file->Append("world")); delete writable_file; // Read sequentially. ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); ASSERT_OK(seq_file->Skip(1)); ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. ASSERT_EQ(0, result.size()); ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. ASSERT_OK(seq_file->Read(1000, &result, scratch)); ASSERT_EQ(0, result.size()); delete seq_file; // Random reads. ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". ASSERT_EQ(0, result.compare("d")); // Too high offset. ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok()); delete rand_file; } TEST(MemEnvTest, Locks) { FileLock* lock; // These are no-ops, but we test they return success. ASSERT_OK(env_->LockFile("some file", &lock)); ASSERT_OK(env_->UnlockFile(lock)); } TEST(MemEnvTest, Misc) { std::string test_dir; ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_TRUE(!test_dir.empty()); WritableFile* writable_file; ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file)); // These are no-ops, but we test they return success. ASSERT_OK(writable_file->Sync()); ASSERT_OK(writable_file->Flush()); ASSERT_OK(writable_file->Close()); delete writable_file; } TEST(MemEnvTest, LargeWrite) { const size_t kWriteSize = 300 * 1024; char* scratch = new char[kWriteSize * 2]; std::string write_data; for (size_t i = 0; i < kWriteSize; ++i) { write_data.append(1, static_cast<char>(i)); } WritableFile* writable_file; ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("foo")); ASSERT_OK(writable_file->Append(write_data)); delete writable_file; SequentialFile* seq_file; Slice result; ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". ASSERT_EQ(0, result.compare("foo")); size_t read = 0; std::string read_data; while (read < kWriteSize) { ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch)); read_data.append(result.data(), result.size()); read += result.size(); } ASSERT_TRUE(write_data == read_data); delete seq_file; delete [] scratch; } TEST(MemEnvTest, DBTest) { Options options; options.create_if_missing = true; options.env = env_; DB* db; const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; ASSERT_OK(DB::Open(options, "/dir/db", &db)); for (size_t i = 0; i < 3; ++i) { ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); } for (size_t i = 0; i < 3; ++i) { std::string res; ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } Iterator* iterator = db->NewIterator(ReadOptions()); iterator->SeekToFirst(); for (size_t i = 0; i < 3; ++i) { ASSERT_TRUE(iterator->Valid()); ASSERT_TRUE(keys[i] == iterator->key()); ASSERT_TRUE(vals[i] == iterator->value()); iterator->Next(); } ASSERT_TRUE(!iterator->Valid()); delete iterator; DBImpl* dbi = reinterpret_cast<DBImpl*>(db); ASSERT_OK(dbi->TEST_CompactMemTable()); for (size_t i = 0; i < 3; ++i) { std::string res; ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } delete db; } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
helpers/memenv/memenv_test.cc
C++
bsd
6,846
# Copyright (c) 2011 The LevelDB Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. See the AUTHORS file for names of contributors. CC = g++ #----------------------------------------------- # Uncomment exactly one of the lines labelled (A), (B), and (C) below # to switch between compilation modes. OPT = -O2 -DNDEBUG # (A) Production use (optimized mode) # OPT = -g2 # (B) Debug mode, w/ full line-level debugging symbols # OPT = -O2 -g2 -DNDEBUG # (C) Profiling mode: opt, but w/debugging symbols #----------------------------------------------- # detect what platform we're building on $(shell sh ./build_detect_platform) # this file is generated by build_detect_platform to set build flags include build_config.mk # If Snappy is installed, add compilation and linker flags # (see http://code.google.com/p/snappy/) ifeq ($(SNAPPY), 1) SNAPPY_CFLAGS=-DSNAPPY SNAPPY_LDFLAGS=-lsnappy else SNAPPY_CFLAGS= SNAPPY_LDFLAGS= endif # If Google Perf Tools are installed, add compilation and linker flags # (see http://code.google.com/p/google-perftools/) ifeq ($(GOOGLE_PERFTOOLS), 1) GOOGLE_PERFTOOLS_LDFLAGS=-ltcmalloc else GOOGLE_PERFTOOLS_LDFLAGS= endif CFLAGS = -c -I. -I./include $(PORT_CFLAGS) $(PLATFORM_CFLAGS) $(OPT) $(SNAPPY_CFLAGS) LDFLAGS=$(PLATFORM_LDFLAGS) $(SNAPPY_LDFLAGS) $(GOOGLE_PERFTOOLS_LDFLAGS) LIBOBJECTS = \ ./db/builder.o \ ./db/c.o \ ./db/db_impl.o \ ./db/db_iter.o \ ./db/filename.o \ ./db/dbformat.o \ ./db/log_reader.o \ ./db/log_writer.o \ ./db/memtable.o \ ./db/repair.o \ ./db/table_cache.o \ ./db/version_edit.o \ ./db/version_set.o \ ./db/write_batch.o \ ./port/port_posix.o \ ./table/block.o \ ./table/block_builder.o \ ./table/format.o \ ./table/iterator.o \ ./table/merger.o \ ./table/table.o \ ./table/table_builder.o \ ./table/two_level_iterator.o \ ./util/arena.o \ ./util/cache.o \ ./util/coding.o \ ./util/comparator.o \ ./util/crc32c.o \ ./util/env.o \ ./util/env_posix.o \ ./util/hash.o \ ./util/histogram.o \ ./util/logging.o \ ./util/options.o \ ./util/status.o TESTUTIL = ./util/testutil.o TESTHARNESS = ./util/testharness.o $(TESTUTIL) TESTS = \ arena_test \ c_test \ cache_test \ coding_test \ corruption_test \ crc32c_test \ db_test \ dbformat_test \ env_test \ filename_test \ log_test \ memenv_test \ skiplist_test \ table_test \ version_edit_test \ version_set_test \ write_batch_test PROGRAMS = db_bench $(TESTS) BENCHMARKS = db_bench_sqlite3 db_bench_tree_db LIBRARY = libleveldb.a MEMENVLIBRARY = libmemenv.a all: $(LIBRARY) check: $(PROGRAMS) $(TESTS) for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done clean: -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(MEMENVLIBRARY) */*.o */*/*.o ios-x86/*/*.o ios-arm/*/*.o -rm -rf ios-x86/* ios-arm/* -rm build_config.mk $(LIBRARY): $(LIBOBJECTS) rm -f $@ $(AR) -rs $@ $(LIBOBJECTS) db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) $(CC) $(LDFLAGS) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) -o $@ db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) $(CC) $(LDFLAGS) -lsqlite3 doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ db_bench_tree_db: doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) $(CC) $(LDFLAGS) -lkyotocabinet doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) -o $@ arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) $(CC) $(LDFLAGS) db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(MEMENVLIBRARY) : helpers/memenv/memenv.o rm -f $@ $(AR) -rs $@ helpers/memenv/memenv.o memenv_test : helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) $(CC) $(LDFLAGS) helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) -o $@ ifeq ($(PLATFORM), IOS) # For iOS, create universal object files to be used on both the simulator and # a device. SIMULATORROOT=/Developer/Platforms/iPhoneSimulator.platform/Developer DEVICEROOT=/Developer/Platforms/iPhoneOS.platform/Developer IOSVERSION=$(shell defaults read /Developer/Platforms/iPhoneOS.platform/version CFBundleShortVersionString) .cc.o: mkdir -p ios-x86/$(dir $@) $(SIMULATORROOT)/usr/bin/$(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 $< -o ios-x86/$@ mkdir -p ios-arm/$(dir $@) $(DEVICEROOT)/usr/bin/$(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 $< -o ios-arm/$@ lipo ios-x86/$@ ios-arm/$@ -create -output $@ .c.o: mkdir -p ios-x86/$(dir $@) $(SIMULATORROOT)/usr/bin/$(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 $< -o ios-x86/$@ mkdir -p ios-arm/$(dir $@) $(DEVICEROOT)/usr/bin/$(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 $< -o ios-arm/$@ lipo ios-x86/$@ ios-arm/$@ -create -output $@ else .cc.o: $(CC) $(CFLAGS) $< -o $@ .c.o: $(CC) $(CFLAGS) $< -o $@ endif
zzxiaogx-leveldb
Makefile
Makefile
bsd
7,012
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <string.h> #include "util/coding.h" #include "util/hash.h" namespace leveldb { uint32_t Hash(const char* data, size_t n, uint32_t seed) { // Similar to murmur hash const uint32_t m = 0xc6a4a793; const uint32_t r = 24; const char* limit = data + n; uint32_t h = seed ^ (n * m); // Pick up four bytes at a time while (data + 4 <= limit) { uint32_t w = DecodeFixed32(data); data += 4; h += w; h *= m; h ^= (h >> 16); } // Pick up remaining bytes switch (limit - data) { case 3: h += data[2] << 16; // fall through case 2: h += data[1] << 8; // fall through case 1: h += data[0]; h *= m; h ^= (h >> r); break; } return h; } }
zzxiaogx-leveldb
util/hash.cc
C++
bsd
958
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/coding.h" namespace leveldb { void EncodeFixed32(char* buf, uint32_t value) { #if __BYTE_ORDER == __LITTLE_ENDIAN memcpy(buf, &value, sizeof(value)); #else buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; #endif } void EncodeFixed64(char* buf, uint64_t value) { #if __BYTE_ORDER == __LITTLE_ENDIAN memcpy(buf, &value, sizeof(value)); #else buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; buf[4] = (value >> 32) & 0xff; buf[5] = (value >> 40) & 0xff; buf[6] = (value >> 48) & 0xff; buf[7] = (value >> 56) & 0xff; #endif } void PutFixed32(std::string* dst, uint32_t value) { char buf[sizeof(value)]; EncodeFixed32(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed64(std::string* dst, uint64_t value) { char buf[sizeof(value)]; EncodeFixed64(buf, value); dst->append(buf, sizeof(buf)); } char* EncodeVarint32(char* dst, uint32_t v) { // Operate on characters as unsigneds unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); static const int B = 128; if (v < (1<<7)) { *(ptr++) = v; } else if (v < (1<<14)) { *(ptr++) = v | B; *(ptr++) = v>>7; } else if (v < (1<<21)) { *(ptr++) = v | B; *(ptr++) = (v>>7) | B; *(ptr++) = v>>14; } else if (v < (1<<28)) { *(ptr++) = v | B; *(ptr++) = (v>>7) | B; *(ptr++) = (v>>14) | B; *(ptr++) = v>>21; } else { *(ptr++) = v | B; *(ptr++) = (v>>7) | B; *(ptr++) = (v>>14) | B; *(ptr++) = (v>>21) | B; *(ptr++) = v>>28; } return reinterpret_cast<char*>(ptr); } void PutVarint32(std::string* dst, uint32_t v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } char* EncodeVarint64(char* dst, uint64_t v) { static const int B = 128; unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); while (v >= B) { *(ptr++) = (v & (B-1)) | B; v >>= 7; } *(ptr++) = static_cast<unsigned char>(v); return reinterpret_cast<char*>(ptr); } void PutVarint64(std::string* dst, uint64_t v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } void PutLengthPrefixedSlice(std::string* dst, const Slice& value) { PutVarint32(dst, value.size()); dst->append(value.data(), value.size()); } int VarintLength(uint64_t v) { int len = 1; while (v >= 128) { v >>= 7; len++; } return len; } const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value) { uint32_t result = 0; for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) { uint32_t byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { // More bytes are present result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return NULL; } bool GetVarint32(Slice* input, uint32_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); if (q == NULL) { return false; } else { *input = Slice(q, limit - q); return true; } } const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) { uint64_t result = 0; for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) { uint64_t byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { // More bytes are present result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return NULL; } bool GetVarint64(Slice* input, uint64_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); if (q == NULL) { return false; } else { *input = Slice(q, limit - q); return true; } } const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result) { uint32_t len; p = GetVarint32Ptr(p, limit, &len); if (p == NULL) return NULL; if (p + len > limit) return NULL; *result = Slice(p, len); return p + len; } bool GetLengthPrefixedSlice(Slice* input, Slice* result) { uint32_t len; if (GetVarint32(input, &len) && input->size() >= len) { *result = Slice(input->data(), len); input->remove_prefix(len); return true; } else { return false; } } }
zzxiaogx-leveldb
util/coding.cc
C++
bsd
4,887
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/env.h" #include "port/port.h" #include "util/testharness.h" namespace leveldb { static const int kDelayMicros = 100000; class EnvPosixTest { private: port::Mutex mu_; std::string events_; public: Env* env_; EnvPosixTest() : env_(Env::Default()) { } }; static void SetBool(void* ptr) { *(reinterpret_cast<bool*>(ptr)) = true; } TEST(EnvPosixTest, RunImmediately) { bool called = false; env_->Schedule(&SetBool, &called); Env::Default()->SleepForMicroseconds(kDelayMicros); ASSERT_TRUE(called); } TEST(EnvPosixTest, RunMany) { int last_id = 0; struct CB { int* last_id_ptr; // Pointer to shared slot int id; // Order# for the execution of this callback CB(int* p, int i) : last_id_ptr(p), id(i) { } static void Run(void* v) { CB* cb = reinterpret_cast<CB*>(v); ASSERT_EQ(cb->id-1, *cb->last_id_ptr); *cb->last_id_ptr = cb->id; } }; // Schedule in different order than start time CB cb1(&last_id, 1); CB cb2(&last_id, 2); CB cb3(&last_id, 3); CB cb4(&last_id, 4); env_->Schedule(&CB::Run, &cb1); env_->Schedule(&CB::Run, &cb2); env_->Schedule(&CB::Run, &cb3); env_->Schedule(&CB::Run, &cb4); Env::Default()->SleepForMicroseconds(kDelayMicros); ASSERT_EQ(4, last_id); } struct State { port::Mutex mu; int val; int num_running; }; static void ThreadBody(void* arg) { State* s = reinterpret_cast<State*>(arg); s->mu.Lock(); s->val += 1; s->num_running -= 1; s->mu.Unlock(); } TEST(EnvPosixTest, StartThread) { State state; state.val = 0; state.num_running = 3; for (int i = 0; i < 3; i++) { env_->StartThread(&ThreadBody, &state); } while (true) { state.mu.Lock(); int num = state.num_running; state.mu.Unlock(); if (num == 0) { break; } Env::Default()->SleepForMicroseconds(kDelayMicros); } ASSERT_EQ(state.val, 3); } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
util/env_test.cc
C++
bsd
2,209
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ #include <stdint.h> namespace leveldb { // A very simple random number generator. Not especially good at // generating truly random bits, but good enough for our needs in this // package. class Random { private: uint32_t seed_; public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { } uint32_t Next() { static const uint32_t M = 2147483647L; // 2^31-1 static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // // seed_ must not be zero or M, or else all subsequent computed values // will be zero or M respectively. For all other values, seed_ will end // up cycling through every number in [1,M-1] uint64_t product = seed_ * A; // Compute (product % M) using the fact that ((x << 31) % M) == x. seed_ = static_cast<uint32_t>((product >> 31) + (product & M)); // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. if (seed_ > M) { seed_ -= M; } return seed_; } // Returns a uniformly distributed value in the range [0..n-1] // REQUIRES: n > 0 uint32_t Uniform(int n) { return Next() % n; } // Randomly returns true ~"1/n" of the time, and false otherwise. // REQUIRES: n > 0 bool OneIn(int n) { return (Next() % n) == 0; } // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } #endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
zzxiaogx-leveldb
util/random.h
C++
bsd
2,057
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/cache.h" #include <vector> #include "util/coding.h" #include "util/testharness.h" namespace leveldb { // Conversions between numeric keys/values and the types expected by Cache. static std::string EncodeKey(int k) { std::string result; PutFixed32(&result, k); return result; } static int DecodeKey(const Slice& k) { assert(k.size() == 4); return DecodeFixed32(k.data()); } static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); } static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); } class CacheTest { public: static CacheTest* current_; static void Deleter(const Slice& key, void* v) { current_->deleted_keys_.push_back(DecodeKey(key)); current_->deleted_values_.push_back(DecodeValue(v)); } static const int kCacheSize = 1000; std::vector<int> deleted_keys_; std::vector<int> deleted_values_; Cache* cache_; CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; } ~CacheTest() { delete cache_; } int Lookup(int key) { Cache::Handle* handle = cache_->Lookup(EncodeKey(key)); const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle)); if (handle != NULL) { cache_->Release(handle); } return r; } void Insert(int key, int value, int charge = 1) { cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge, &CacheTest::Deleter)); } void Erase(int key) { cache_->Erase(EncodeKey(key)); } }; CacheTest* CacheTest::current_; TEST(CacheTest, HitAndMiss) { ASSERT_EQ(-1, Lookup(100)); Insert(100, 101); ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(-1, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); Insert(200, 201); ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); Insert(100, 102); ASSERT_EQ(102, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); } TEST(CacheTest, Erase) { Erase(200); ASSERT_EQ(0, deleted_keys_.size()); Insert(100, 101); Insert(200, 201); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); } TEST(CacheTest, EntriesArePinned) { Insert(100, 101); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); Insert(100, 102); Cache::Handle* h2 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(102, DecodeValue(cache_->Value(h2))); ASSERT_EQ(0, deleted_keys_.size()); cache_->Release(h1); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(1, deleted_keys_.size()); cache_->Release(h2); ASSERT_EQ(2, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[1]); ASSERT_EQ(102, deleted_values_[1]); } TEST(CacheTest, EvictionPolicy) { Insert(100, 101); Insert(200, 201); // Frequently used entry must be kept around for (int i = 0; i < kCacheSize + 100; i++) { Insert(1000+i, 2000+i); ASSERT_EQ(2000+i, Lookup(1000+i)); ASSERT_EQ(101, Lookup(100)); } ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(-1, Lookup(200)); } TEST(CacheTest, HeavyEntries) { // Add a bunch of light and heavy entries and then count the combined // size of items still in the cache, which must be approximately the // same as the total capacity. const int kLight = 1; const int kHeavy = 10; int added = 0; int index = 0; while (added < 2*kCacheSize) { const int weight = (index & 1) ? kLight : kHeavy; Insert(index, 1000+index, weight); added += weight; index++; } int cached_weight = 0; for (int i = 0; i < index; i++) { const int weight = (i & 1 ? kLight : kHeavy); int r = Lookup(i); if (r >= 0) { cached_weight += weight; ASSERT_EQ(1000+i, r); } } ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10); } TEST(CacheTest, NewId) { uint64_t a = cache_->NewId(); uint64_t b = cache_->NewId(); ASSERT_NE(a, b); } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
util/cache_test.cc
C++
bsd
4,665
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <deque> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <time.h> #include <unistd.h> #if defined(LEVELDB_PLATFORM_ANDROID) #include <sys/stat.h> #endif #include "leveldb/env.h" #include "leveldb/slice.h" #include "port/port.h" #include "util/logging.h" #include "util/posix_logger.h" namespace leveldb { namespace { static Status IOError(const std::string& context, int err_number) { return Status::IOError(context, strerror(err_number)); } class PosixSequentialFile: public SequentialFile { private: std::string filename_; FILE* file_; public: PosixSequentialFile(const std::string& fname, FILE* f) : filename_(fname), file_(f) { } virtual ~PosixSequentialFile() { fclose(file_); } virtual Status Read(size_t n, Slice* result, char* scratch) { Status s; size_t r = fread_unlocked(scratch, 1, n, file_); *result = Slice(scratch, r); if (r < n) { if (feof(file_)) { // We leave status as ok if we hit the end of the file } else { // A partial read with an error: return a non-ok status s = IOError(filename_, errno); } } return s; } virtual Status Skip(uint64_t n) { if (fseek(file_, n, SEEK_CUR)) { return IOError(filename_, errno); } return Status::OK(); } }; class PosixRandomAccessFile: public RandomAccessFile { private: std::string filename_; int fd_; public: PosixRandomAccessFile(const std::string& fname, int fd) : filename_(fname), fd_(fd) { } virtual ~PosixRandomAccessFile() { close(fd_); } virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { Status s; ssize_t r = pread(fd_, scratch, n, static_cast<off_t>(offset)); *result = Slice(scratch, (r < 0) ? 0 : r); if (r < 0) { // An error: return a non-ok status s = IOError(filename_, errno); } return s; } }; // We preallocate up to an extra megabyte and use memcpy to append new // data to the file. This is safe since we either properly close the // file before reading from it, or for log files, the reading code // knows enough to skip zero suffixes. class PosixMmapFile : public WritableFile { private: std::string filename_; int fd_; size_t page_size_; size_t map_size_; // How much extra memory to map at a time char* base_; // The mapped region char* limit_; // Limit of the mapped region char* dst_; // Where to write next (in range [base_,limit_]) char* last_sync_; // Where have we synced up to uint64_t file_offset_; // Offset of base_ in file // Have we done an munmap of unsynced data? bool pending_sync_; // Roundup x to a multiple of y static size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; } size_t TruncateToPageBoundary(size_t s) { s -= (s & (page_size_ - 1)); assert((s % page_size_) == 0); return s; } bool UnmapCurrentRegion() { bool result = true; if (base_ != NULL) { if (last_sync_ < limit_) { // Defer syncing this data until next Sync() call, if any pending_sync_ = true; } if (munmap(base_, limit_ - base_) != 0) { result = false; } file_offset_ += limit_ - base_; base_ = NULL; limit_ = NULL; last_sync_ = NULL; dst_ = NULL; // Increase the amount we map the next time, but capped at 1MB if (map_size_ < (1<<20)) { map_size_ *= 2; } } return result; } bool MapNewRegion() { assert(base_ == NULL); if (ftruncate(fd_, file_offset_ + map_size_) < 0) { return false; } void* ptr = mmap(NULL, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, file_offset_); if (ptr == MAP_FAILED) { return false; } base_ = reinterpret_cast<char*>(ptr); limit_ = base_ + map_size_; dst_ = base_; last_sync_ = base_; return true; } public: PosixMmapFile(const std::string& fname, int fd, size_t page_size) : filename_(fname), fd_(fd), page_size_(page_size), map_size_(Roundup(65536, page_size)), base_(NULL), limit_(NULL), dst_(NULL), last_sync_(NULL), file_offset_(0), pending_sync_(false) { assert((page_size & (page_size - 1)) == 0); } ~PosixMmapFile() { if (fd_ >= 0) { PosixMmapFile::Close(); } } virtual Status Append(const Slice& data) { const char* src = data.data(); size_t left = data.size(); while (left > 0) { assert(base_ <= dst_); assert(dst_ <= limit_); size_t avail = limit_ - dst_; if (avail == 0) { if (!UnmapCurrentRegion() || !MapNewRegion()) { return IOError(filename_, errno); } } size_t n = (left <= avail) ? left : avail; memcpy(dst_, src, n); dst_ += n; src += n; left -= n; } return Status::OK(); } virtual Status Close() { Status s; size_t unused = limit_ - dst_; if (!UnmapCurrentRegion()) { s = IOError(filename_, errno); } else if (unused > 0) { // Trim the extra space at the end of the file if (ftruncate(fd_, file_offset_ - unused) < 0) { s = IOError(filename_, errno); } } if (close(fd_) < 0) { if (s.ok()) { s = IOError(filename_, errno); } } fd_ = -1; base_ = NULL; limit_ = NULL; return s; } virtual Status Flush() { return Status::OK(); } virtual Status Sync() { Status s; if (pending_sync_) { // Some unmapped data was not synced pending_sync_ = false; if (fdatasync(fd_) < 0) { s = IOError(filename_, errno); } } if (dst_ > last_sync_) { // Find the beginnings of the pages that contain the first and last // bytes to be synced. size_t p1 = TruncateToPageBoundary(last_sync_ - base_); size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1); last_sync_ = dst_; if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) { s = IOError(filename_, errno); } } return s; } }; static int LockOrUnlock(int fd, bool lock) { errno = 0; struct flock f; memset(&f, 0, sizeof(f)); f.l_type = (lock ? F_WRLCK : F_UNLCK); f.l_whence = SEEK_SET; f.l_start = 0; f.l_len = 0; // Lock/unlock entire file return fcntl(fd, F_SETLK, &f); } class PosixFileLock : public FileLock { public: int fd_; }; class PosixEnv : public Env { public: PosixEnv(); virtual ~PosixEnv() { fprintf(stderr, "Destroying Env::Default()\n"); exit(1); } virtual Status NewSequentialFile(const std::string& fname, SequentialFile** result) { FILE* f = fopen(fname.c_str(), "r"); if (f == NULL) { *result = NULL; return IOError(fname, errno); } else { *result = new PosixSequentialFile(fname, f); return Status::OK(); } } virtual Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result) { int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { *result = NULL; return IOError(fname, errno); } *result = new PosixRandomAccessFile(fname, fd); return Status::OK(); } virtual Status NewWritableFile(const std::string& fname, WritableFile** result) { Status s; const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644); if (fd < 0) { *result = NULL; s = IOError(fname, errno); } else { *result = new PosixMmapFile(fname, fd, page_size_); } return s; } virtual bool FileExists(const std::string& fname) { return access(fname.c_str(), F_OK) == 0; } virtual Status GetChildren(const std::string& dir, std::vector<std::string>* result) { result->clear(); DIR* d = opendir(dir.c_str()); if (d == NULL) { return IOError(dir, errno); } struct dirent* entry; while ((entry = readdir(d)) != NULL) { result->push_back(entry->d_name); } closedir(d); return Status::OK(); } virtual Status DeleteFile(const std::string& fname) { Status result; if (unlink(fname.c_str()) != 0) { result = IOError(fname, errno); } return result; }; virtual Status CreateDir(const std::string& name) { Status result; if (mkdir(name.c_str(), 0755) != 0) { result = IOError(name, errno); } return result; }; virtual Status DeleteDir(const std::string& name) { Status result; if (rmdir(name.c_str()) != 0) { result = IOError(name, errno); } return result; }; virtual Status GetFileSize(const std::string& fname, uint64_t* size) { Status s; struct stat sbuf; if (stat(fname.c_str(), &sbuf) != 0) { *size = 0; s = IOError(fname, errno); } else { *size = sbuf.st_size; } return s; } virtual Status RenameFile(const std::string& src, const std::string& target) { Status result; if (rename(src.c_str(), target.c_str()) != 0) { result = IOError(src, errno); } return result; } virtual Status LockFile(const std::string& fname, FileLock** lock) { *lock = NULL; Status result; int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); if (fd < 0) { result = IOError(fname, errno); } else if (LockOrUnlock(fd, true) == -1) { result = IOError("lock " + fname, errno); close(fd); } else { PosixFileLock* my_lock = new PosixFileLock; my_lock->fd_ = fd; *lock = my_lock; } return result; } virtual Status UnlockFile(FileLock* lock) { PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock); Status result; if (LockOrUnlock(my_lock->fd_, false) == -1) { result = IOError("unlock", errno); } close(my_lock->fd_); delete my_lock; return result; } virtual void Schedule(void (*function)(void*), void* arg); virtual void StartThread(void (*function)(void* arg), void* arg); virtual Status GetTestDirectory(std::string* result) { const char* env = getenv("TEST_TMPDIR"); if (env && env[0] != '\0') { *result = env; } else { char buf[100]; snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid())); *result = buf; } // Directory may already exist CreateDir(*result); return Status::OK(); } static uint64_t gettid() { pthread_t tid = pthread_self(); uint64_t thread_id = 0; memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid))); return thread_id; } virtual Status NewLogger(const std::string& fname, Logger** result) { FILE* f = fopen(fname.c_str(), "w"); if (f == NULL) { *result = NULL; return IOError(fname, errno); } else { *result = new PosixLogger(f, &PosixEnv::gettid); return Status::OK(); } } virtual uint64_t NowMicros() { struct timeval tv; gettimeofday(&tv, NULL); return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; } virtual void SleepForMicroseconds(int micros) { usleep(micros); } private: void PthreadCall(const char* label, int result) { if (result != 0) { fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); exit(1); } } // BGThread() is the body of the background thread void BGThread(); static void* BGThreadWrapper(void* arg) { reinterpret_cast<PosixEnv*>(arg)->BGThread(); return NULL; } size_t page_size_; pthread_mutex_t mu_; pthread_cond_t bgsignal_; pthread_t bgthread_; bool started_bgthread_; // Entry per Schedule() call struct BGItem { void* arg; void (*function)(void*); }; typedef std::deque<BGItem> BGQueue; BGQueue queue_; }; PosixEnv::PosixEnv() : page_size_(getpagesize()), started_bgthread_(false) { PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); } void PosixEnv::Schedule(void (*function)(void*), void* arg) { PthreadCall("lock", pthread_mutex_lock(&mu_)); // Start background thread if necessary if (!started_bgthread_) { started_bgthread_ = true; PthreadCall( "create thread", pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this)); } // If the queue is currently empty, the background thread may currently be // waiting. if (queue_.empty()) { PthreadCall("signal", pthread_cond_signal(&bgsignal_)); } // Add to priority queue queue_.push_back(BGItem()); queue_.back().function = function; queue_.back().arg = arg; PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } void PosixEnv::BGThread() { while (true) { // Wait until there is an item that is ready to run PthreadCall("lock", pthread_mutex_lock(&mu_)); while (queue_.empty()) { PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_)); } void (*function)(void*) = queue_.front().function; void* arg = queue_.front().arg; queue_.pop_front(); PthreadCall("unlock", pthread_mutex_unlock(&mu_)); (*function)(arg); } } namespace { struct StartThreadState { void (*user_function)(void*); void* arg; }; } static void* StartThreadWrapper(void* arg) { StartThreadState* state = reinterpret_cast<StartThreadState*>(arg); state->user_function(state->arg); delete state; return NULL; } void PosixEnv::StartThread(void (*function)(void* arg), void* arg) { pthread_t t; StartThreadState* state = new StartThreadState; state->user_function = function; state->arg = arg; PthreadCall("start thread", pthread_create(&t, NULL, &StartThreadWrapper, state)); } } static pthread_once_t once = PTHREAD_ONCE_INIT; static Env* default_env; static void InitDefaultEnv() { default_env = new PosixEnv; } Env* Env::Default() { pthread_once(&once, InitDefaultEnv); return default_env; } }
zzxiaogx-leveldb
util/env_posix.cc
C++
bsd
14,488
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> #include "port/port.h" #include "leveldb/status.h" namespace leveldb { const char* Status::CopyState(const char* state) { uint32_t size; memcpy(&size, state, sizeof(size)); char* result = new char[size + 5]; memcpy(result, state, size + 5); return result; } Status::Status(Code code, const Slice& msg, const Slice& msg2) { assert(code != kOk); const uint32_t len1 = msg.size(); const uint32_t len2 = msg2.size(); const uint32_t size = len1 + (len2 ? (2 + len2) : 0); char* result = new char[size + 5]; memcpy(result, &size, sizeof(size)); result[4] = static_cast<char>(code); memcpy(result + 5, msg.data(), len1); if (len2) { result[5 + len1] = ':'; result[6 + len1] = ' '; memcpy(result + 7 + len1, msg2.data(), len2); } state_ = result; } std::string Status::ToString() const { if (state_ == NULL) { return "OK"; } else { char tmp[30]; const char* type; switch (code()) { case kOk: type = "OK"; break; case kNotFound: type = "NotFound: "; break; case kCorruption: type = "Corruption: "; break; case kNotSupported: type = "Not implemented: "; break; case kInvalidArgument: type = "Invalid argument: "; break; case kIOError: type = "IO error: "; break; default: snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast<int>(code())); type = tmp; break; } std::string result(type); uint32_t length; memcpy(&length, state_, sizeof(length)); result.append(state_ + 5, length); return result; } } }
zzxiaogx-leveldb
util/status.cc
C++
bsd
1,909
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/crc32c.h" #include "util/testharness.h" namespace leveldb { namespace crc32c { class CRC { }; TEST(CRC, StandardResults) { // From rfc3720 section B.4. char buf[32]; memset(buf, 0, sizeof(buf)); ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf))); memset(buf, 0xff, sizeof(buf)); ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { buf[i] = i; } ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { buf[i] = 31 - i; } ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf))); unsigned char data[48] = { 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data))); } TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); } TEST(CRC, Extend) { ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5)); } TEST(CRC, Mask) { uint32_t crc = Value("foo", 3); ASSERT_NE(crc, Mask(crc)); ASSERT_NE(crc, Mask(Mask(crc))); ASSERT_EQ(crc, Unmask(Mask(crc))); ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc))))); } } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
util/crc32c_test.cc
C++
bsd
1,705
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/env.h" namespace leveldb { Env::~Env() { } SequentialFile::~SequentialFile() { } RandomAccessFile::~RandomAccessFile() { } WritableFile::~WritableFile() { } Logger::~Logger() { } FileLock::~FileLock() { } void Log(Logger* info_log, const char* format, ...) { if (info_log != NULL) { va_list ap; va_start(ap, format); info_log->Logv(format, ap); va_end(ap); } } Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname) { WritableFile* file; Status s = env->NewWritableFile(fname, &file); if (!s.ok()) { return s; } s = file->Append(data); if (s.ok()) { s = file->Close(); } delete file; // Will auto-close if we did not close above if (!s.ok()) { env->DeleteFile(fname); } return s; } Status ReadFileToString(Env* env, const std::string& fname, std::string* data) { data->clear(); SequentialFile* file; Status s = env->NewSequentialFile(fname, &file); if (!s.ok()) { return s; } static const int kBufferSize = 8192; char* space = new char[kBufferSize]; while (true) { Slice fragment; s = file->Read(kBufferSize, &fragment, space); if (!s.ok()) { break; } data->append(fragment.data(), fragment.size()); if (fragment.empty()) { break; } } delete[] space; delete file; return s; } EnvWrapper::~EnvWrapper() { } }
zzxiaogx-leveldb
util/env.cc
C++
bsd
1,628
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. extern Slice RandomString(Random* rnd, int len, std::string* dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). extern std::string RandomKey(Random* rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. extern Slice CompressibleString(Random* rnd, double compressed_fraction, int len, std::string* dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { public: bool writable_file_error_; int num_writable_file_errors_; ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0) { } virtual Status NewWritableFile(const std::string& fname, WritableFile** result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = NULL; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } }; } } #endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
zzxiaogx-leveldb
util/testutil.h
C++
bsd
1,714
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_ARENA_H_ #define STORAGE_LEVELDB_UTIL_ARENA_H_ #include <cstddef> #include <vector> #include <assert.h> #include <stdint.h> namespace leveldb { class Arena { public: Arena(); ~Arena(); // Return a pointer to a newly allocated memory block of "bytes" bytes. char* Allocate(size_t bytes); // Allocate memory with the normal alignment guarantees provided by malloc char* AllocateAligned(size_t bytes); // Returns an estimate of the total memory usage of data allocated // by the arena (including space allocated but not yet used for user // allocations). size_t MemoryUsage() const { return blocks_memory_ + blocks_.capacity() * sizeof(char*); } private: char* AllocateFallback(size_t bytes); char* AllocateNewBlock(size_t block_bytes); // Allocation state char* alloc_ptr_; size_t alloc_bytes_remaining_; // Array of new[] allocated memory blocks std::vector<char*> blocks_; // Bytes of memory in blocks allocated so far size_t blocks_memory_; // No copying allowed Arena(const Arena&); void operator=(const Arena&); }; inline char* Arena::Allocate(size_t bytes) { // The semantics of what to return are a bit messy if we allow // 0-byte allocations, so we disallow them here (we don't need // them for our internal use). assert(bytes > 0); if (bytes <= alloc_bytes_remaining_) { char* result = alloc_ptr_; alloc_ptr_ += bytes; alloc_bytes_remaining_ -= bytes; return result; } return AllocateFallback(bytes); } } #endif // STORAGE_LEVELDB_UTIL_ARENA_H_
zzxiaogx-leveldb
util/arena.h
C++
bsd
1,795
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_TESTHARNESS_H_ #define STORAGE_LEVELDB_UTIL_TESTHARNESS_H_ #include <stdio.h> #include <stdlib.h> #include <sstream> #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Run some of the tests registered by the TEST() macro. If the // environment variable "LEVELDB_TESTS" is not set, runs all tests. // Otherwise, runs only the tests whose name contains the value of // "LEVELDB_TESTS" as a substring. E.g., suppose the tests are: // TEST(Foo, Hello) { ... } // TEST(Foo, World) { ... } // LEVELDB_TESTS=Hello will run the first test // LEVELDB_TESTS=o will run both tests // LEVELDB_TESTS=Junk will run no tests // // Returns 0 if all tests pass. // Dies or returns a non-zero value if some test fails. extern int RunAllTests(); // Return the directory to use for temporary storage. extern std::string TmpDir(); // Return a randomization seed for this run. Typically returns the // same number on repeated invocations of this binary, but automated // runs may be able to vary the seed. extern int RandomSeed(); // An instance of Tester is allocated to hold temporary state during // the execution of an assertion. class Tester { private: bool ok_; const char* fname_; int line_; std::stringstream ss_; public: Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) { } ~Tester() { if (!ok_) { fprintf(stderr, "%s:%d:%s\n", fname_, line_, ss_.str().c_str()); exit(1); } } Tester& Is(bool b, const char* msg) { if (!b) { ss_ << " Assertion failure " << msg; ok_ = false; } return *this; } Tester& IsOk(const Status& s) { if (!s.ok()) { ss_ << " " << s.ToString(); ok_ = false; } return *this; } #define BINARY_OP(name,op) \ template <class X, class Y> \ Tester& name(const X& x, const Y& y) { \ if (! (x op y)) { \ ss_ << " failed: " << x << (" " #op " ") << y; \ ok_ = false; \ } \ return *this; \ } BINARY_OP(IsEq, ==) BINARY_OP(IsNe, !=) BINARY_OP(IsGe, >=) BINARY_OP(IsGt, >) BINARY_OP(IsLe, <=) BINARY_OP(IsLt, <) #undef BINARY_OP // Attach the specified value to the error message if an error has occurred template <class V> Tester& operator<<(const V& value) { if (!ok_) { ss_ << " " << value; } return *this; } }; #define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c) #define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s)) #define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b)) #define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b)) #define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b)) #define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b)) #define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b)) #define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b)) #define TCONCAT(a,b) TCONCAT1(a,b) #define TCONCAT1(a,b) a##b #define TEST(base,name) \ class TCONCAT(_Test_,name) : public base { \ public: \ void _Run(); \ static void _RunIt() { \ TCONCAT(_Test_,name) t; \ t._Run(); \ } \ }; \ bool TCONCAT(_Test_ignored_,name) = \ ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_,name)::_RunIt); \ void TCONCAT(_Test_,name)::_Run() // Register the specified test. Typically not used directly, but // invoked via the macro expansion of TEST. extern bool RegisterTest(const char* base, const char* name, void (*func)()); } } #endif // STORAGE_LEVELDB_UTIL_TESTHARNESS_H_
zzxiaogx-leveldb
util/testharness.h
C++
bsd
4,666
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/coding.h" #include "util/testharness.h" namespace leveldb { class Coding { }; TEST(Coding, Fixed32) { std::string s; for (uint32_t v = 0; v < 100000; v++) { PutFixed32(&s, v); } const char* p = s.data(); for (uint32_t v = 0; v < 100000; v++) { uint32_t actual = DecodeFixed32(p); ASSERT_EQ(v, actual); p += sizeof(uint32_t); } } TEST(Coding, Fixed64) { std::string s; for (int power = 0; power <= 63; power++) { uint64_t v = static_cast<uint64_t>(1) << power; PutFixed64(&s, v - 1); PutFixed64(&s, v + 0); PutFixed64(&s, v + 1); } const char* p = s.data(); for (int power = 0; power <= 63; power++) { uint64_t v = static_cast<uint64_t>(1) << power; uint64_t actual; actual = DecodeFixed64(p); ASSERT_EQ(v-1, actual); p += sizeof(uint64_t); actual = DecodeFixed64(p); ASSERT_EQ(v+0, actual); p += sizeof(uint64_t); actual = DecodeFixed64(p); ASSERT_EQ(v+1, actual); p += sizeof(uint64_t); } } TEST(Coding, Varint32) { std::string s; for (uint32_t i = 0; i < (32 * 32); i++) { uint32_t v = (i / 32) << (i % 32); PutVarint32(&s, v); } const char* p = s.data(); const char* limit = p + s.size(); for (uint32_t i = 0; i < (32 * 32); i++) { uint32_t expected = (i / 32) << (i % 32); uint32_t actual; const char* start = p; p = GetVarint32Ptr(p, limit, &actual); ASSERT_TRUE(p != NULL); ASSERT_EQ(expected, actual); ASSERT_EQ(VarintLength(actual), p - start); } ASSERT_EQ(p, s.data() + s.size()); } TEST(Coding, Varint64) { // Construct the list of values to check std::vector<uint64_t> values; // Some special values values.push_back(0); values.push_back(100); values.push_back(~static_cast<uint64_t>(0)); values.push_back(~static_cast<uint64_t>(0) - 1); for (uint32_t k = 0; k < 64; k++) { // Test values near powers of two const uint64_t power = 1ull << k; values.push_back(power); values.push_back(power-1); values.push_back(power+1); }; std::string s; for (int i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); for (int i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64_t actual; const char* start = p; p = GetVarint64Ptr(p, limit, &actual); ASSERT_TRUE(p != NULL); ASSERT_EQ(values[i], actual); ASSERT_EQ(VarintLength(actual), p - start); } ASSERT_EQ(p, limit); } TEST(Coding, Varint32Overflow) { uint32_t result; std::string input("\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) == NULL); } TEST(Coding, Varint32Truncation) { uint32_t large_value = (1u << 31) + 100; std::string s; PutVarint32(&s, large_value); uint32_t result; for (int len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); ASSERT_EQ(large_value, result); } TEST(Coding, Varint64Overflow) { uint64_t result; std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) == NULL); } TEST(Coding, Varint64Truncation) { uint64_t large_value = (1ull << 63) + 100ull; std::string s; PutVarint64(&s, large_value); uint64_t result; for (int len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); ASSERT_EQ(large_value, result); } TEST(Coding, Strings) { std::string s; PutLengthPrefixedSlice(&s, Slice("")); PutLengthPrefixedSlice(&s, Slice("foo")); PutLengthPrefixedSlice(&s, Slice("bar")); PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x'))); Slice input(s); Slice v; ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); ASSERT_EQ("", v.ToString()); ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); ASSERT_EQ("foo", v.ToString()); ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); ASSERT_EQ("bar", v.ToString()); ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); ASSERT_EQ(std::string(200, 'x'), v.ToString()); ASSERT_EQ("", input.ToString()); } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
util/coding_test.cc
C++
bsd
4,690
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_CRC32C_H_ #define STORAGE_LEVELDB_UTIL_CRC32C_H_ #include <stddef.h> #include <stdint.h> namespace leveldb { namespace crc32c { // Return the crc32c of concat(A, data[0,n-1]) where init_crc is the // crc32c of some string A. Extend() is often used to maintain the // crc32c of a stream of data. extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n); // Return the crc32c of data[0,n-1] inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); } static const uint32_t kMaskDelta = 0xa282ead8ul; // Return a masked representation of crc. // // Motivation: it is problematic to compute the CRC of a string that // contains embedded CRCs. Therefore we recommend that CRCs stored // somewhere (e.g., in files) should be masked before being stored. inline uint32_t Mask(uint32_t crc) { // Rotate right by 15 bits and add a constant. return ((crc >> 15) | (crc << 17)) + kMaskDelta; } // Return the crc whose masked representation is masked_crc. inline uint32_t Unmask(uint32_t masked_crc) { uint32_t rot = masked_crc - kMaskDelta; return ((rot >> 17) | (rot << 15)); } } } #endif // STORAGE_LEVELDB_UTIL_CRC32C_H_
zzxiaogx-leveldb
util/crc32c.h
C++
bsd
1,414
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Simple hash function used for internal data structures #ifndef STORAGE_LEVELDB_UTIL_HASH_H_ #define STORAGE_LEVELDB_UTIL_HASH_H_ #include <stddef.h> #include <stdint.h> namespace leveldb { extern uint32_t Hash(const char* data, size_t n, uint32_t seed); } #endif // STORAGE_LEVELDB_UTIL_HASH_H_
zzxiaogx-leveldb
util/hash.h
C++
bsd
524
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/arena.h" #include <assert.h> namespace leveldb { static const int kBlockSize = 4096; Arena::Arena() { blocks_memory_ = 0; alloc_ptr_ = NULL; // First allocation will allocate a block alloc_bytes_remaining_ = 0; } Arena::~Arena() { for (size_t i = 0; i < blocks_.size(); i++) { delete[] blocks_[i]; } } char* Arena::AllocateFallback(size_t bytes) { if (bytes > kBlockSize / 4) { // Object is more than a quarter of our block size. Allocate it separately // to avoid wasting too much space in leftover bytes. char* result = AllocateNewBlock(bytes); return result; } // We waste the remaining space in the current block. alloc_ptr_ = AllocateNewBlock(kBlockSize); alloc_bytes_remaining_ = kBlockSize; char* result = alloc_ptr_; alloc_ptr_ += bytes; alloc_bytes_remaining_ -= bytes; return result; } char* Arena::AllocateAligned(size_t bytes) { const int align = sizeof(void*); // We'll align to pointer size assert((align & (align-1)) == 0); // Pointer size should be a power of 2 size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1); size_t slop = (current_mod == 0 ? 0 : align - current_mod); size_t needed = bytes + slop; char* result; if (needed <= alloc_bytes_remaining_) { result = alloc_ptr_ + slop; alloc_ptr_ += needed; alloc_bytes_remaining_ -= needed; } else { // AllocateFallback always returned aligned memory result = AllocateFallback(bytes); } assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0); return result; } char* Arena::AllocateNewBlock(size_t block_bytes) { char* result = new char[block_bytes]; blocks_memory_ += block_bytes; blocks_.push_back(result); return result; } }
zzxiaogx-leveldb
util/arena.cc
C++
bsd
1,971
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #include "port/port.h" namespace leveldb { // Helper class that locks a mutex on construction and unlocks the mutex when // the destructor of the MutexLock object is invoked. // // Typical usage: // // void MyClass::MyMethod() { // MutexLock l(&mu_); // mu_ is an instance variable // ... some complex code, possibly with multiple return paths ... // } class MutexLock { public: explicit MutexLock(port::Mutex *mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() { this->mu_->Unlock(); } private: port::Mutex *const mu_; // No copying allowed MutexLock(const MutexLock&); void operator=(const MutexLock&); }; } #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
zzxiaogx-leveldb
util/mutexlock.h
C++
bsd
1,001
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // A portable implementation of crc32c, optimized to handle // four bytes at a time. #include "util/crc32c.h" #include <stdint.h> #include "util/coding.h" namespace leveldb { namespace crc32c { static const uint32_t table0_[256] = { 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351 }; static const uint32_t table1_[256] = { 0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899, 0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945, 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21, 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd, 0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918, 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4, 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0, 0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c, 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b, 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47, 0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823, 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff, 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a, 0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6, 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2, 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e, 0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d, 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41, 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25, 0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9, 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c, 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0, 0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4, 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78, 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f, 0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43, 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27, 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb, 0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e, 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2, 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6, 0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a, 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260, 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc, 0x66d73941, 0x7575a136, 0x419209af, 0x523091d8, 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004, 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1, 0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d, 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059, 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185, 0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162, 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be, 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da, 0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306, 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3, 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f, 0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b, 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287, 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464, 0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8, 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc, 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600, 0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5, 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439, 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d, 0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781, 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766, 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba, 0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de, 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502, 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7, 0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b, 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f, 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483 }; static const uint32_t table2_[256] = { 0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073, 0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469, 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6, 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac, 0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9, 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3, 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c, 0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726, 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67, 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d, 0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2, 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8, 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed, 0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7, 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828, 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32, 0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa, 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0, 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f, 0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75, 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20, 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a, 0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5, 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff, 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe, 0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4, 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b, 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161, 0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634, 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e, 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1, 0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb, 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730, 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a, 0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5, 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def, 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba, 0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0, 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f, 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065, 0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24, 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e, 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1, 0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb, 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae, 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4, 0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b, 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71, 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9, 0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3, 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c, 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36, 0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63, 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79, 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6, 0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc, 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd, 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7, 0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238, 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622, 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177, 0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d, 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2, 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8 }; static const uint32_t table3_[256] = { 0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939, 0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca, 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf, 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c, 0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804, 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7, 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2, 0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11, 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2, 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41, 0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54, 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7, 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f, 0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c, 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69, 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a, 0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de, 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d, 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538, 0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb, 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3, 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610, 0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405, 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6, 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255, 0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6, 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3, 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040, 0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368, 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b, 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e, 0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d, 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006, 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5, 0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0, 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213, 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b, 0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8, 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd, 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e, 0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d, 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e, 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b, 0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698, 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0, 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443, 0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656, 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5, 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1, 0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12, 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07, 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4, 0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc, 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f, 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a, 0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9, 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a, 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99, 0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c, 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f, 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57, 0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4, 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1, 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842 }; // Used to fetch a naturally-aligned 32-bit word in little endian byte-order static inline uint32_t LE_LOAD32(const uint8_t *p) { return DecodeFixed32(reinterpret_cast<const char*>(p)); } uint32_t Extend(uint32_t crc, const char* buf, size_t size) { const uint8_t *p = reinterpret_cast<const uint8_t *>(buf); const uint8_t *e = p + size; uint32_t l = crc ^ 0xffffffffu; #define STEP1 do { \ int c = (l & 0xff) ^ *p++; \ l = table0_[c] ^ (l >> 8); \ } while (0) #define STEP4 do { \ uint32_t c = l ^ LE_LOAD32(p); \ p += 4; \ l = table3_[c & 0xff] ^ \ table2_[(c >> 8) & 0xff] ^ \ table1_[(c >> 16) & 0xff] ^ \ table0_[c >> 24]; \ } while (0) // Point x at first 4-byte aligned byte in string. This might be // just past the end of the string. const uintptr_t pval = reinterpret_cast<uintptr_t>(p); const uint8_t* x = reinterpret_cast<const uint8_t*>(((pval + 3) >> 2) << 2); if (x <= e) { // Process bytes until finished or p is 4-byte aligned while (p != x) { STEP1; } } // Process bytes 16 at a time while ((e-p) >= 16) { STEP4; STEP4; STEP4; STEP4; } // Process bytes 4 at a time while ((e-p) >= 4) { STEP4; } // Process the last few bytes while (p != e) { STEP1; } #undef STEP4 #undef STEP1 return l ^ 0xffffffffu; } } }
zzxiaogx-leveldb
util/crc32c.cc
C++
bsd
14,934
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Endian-neutral encoding: // * Fixed-length numbers are encoded with least-significant byte first // * In addition we support variable length "varint" encoding // * Strings are encoded prefixed by their length in varint format #ifndef STORAGE_LEVELDB_UTIL_CODING_H_ #define STORAGE_LEVELDB_UTIL_CODING_H_ #include <stdint.h> #include <string.h> #include <string> #include "leveldb/slice.h" #include "port/port.h" namespace leveldb { // Standard Put... routines append to a string extern void PutFixed32(std::string* dst, uint32_t value); extern void PutFixed64(std::string* dst, uint64_t value); extern void PutVarint32(std::string* dst, uint32_t value); extern void PutVarint64(std::string* dst, uint64_t value); extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value); // Standard Get... routines parse a value from the beginning of a Slice // and advance the slice past the parsed value. extern bool GetVarint32(Slice* input, uint32_t* value); extern bool GetVarint64(Slice* input, uint64_t* value); extern bool GetLengthPrefixedSlice(Slice* input, Slice* result); // Pointer-based variants of GetVarint... These either store a value // in *v and return a pointer just past the parsed value, or return // NULL on error. These routines only look at bytes in the range // [p..limit-1] extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v); extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v); // Returns the length of the varint32 or varint64 encoding of "v" extern int VarintLength(uint64_t v); // Lower-level versions of Put... that write directly into a character buffer // REQUIRES: dst has enough space for the value being written extern void EncodeFixed32(char* dst, uint32_t value); extern void EncodeFixed64(char* dst, uint64_t value); // Lower-level versions of Put... that write directly into a character buffer // and return a pointer just past the last byte written. // REQUIRES: dst has enough space for the value being written extern char* EncodeVarint32(char* dst, uint32_t value); extern char* EncodeVarint64(char* dst, uint64_t value); // Lower-level versions of Get... that read directly from a character buffer // without any bounds checking. inline uint32_t DecodeFixed32(const char* ptr) { if (port::kLittleEndian) { // Load the raw bytes uint32_t result; memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load return result; } else { return ((static_cast<uint32_t>(ptr[0])) | (static_cast<uint32_t>(ptr[1]) << 8) | (static_cast<uint32_t>(ptr[2]) << 16) | (static_cast<uint32_t>(ptr[3]) << 24)); } } inline uint64_t DecodeFixed64(const char* ptr) { if (port::kLittleEndian) { // Load the raw bytes uint64_t result; memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load return result; } else { uint64_t lo = DecodeFixed32(ptr); uint64_t hi = DecodeFixed32(ptr + 4); return (hi << 32) | lo; } } // Internal routine for use by fallback path of GetVarint32Ptr extern const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value); inline const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* value) { if (p < limit) { uint32_t result = *(reinterpret_cast<const unsigned char*>(p)); if ((result & 128) == 0) { *value = result; return p + 1; } } return GetVarint32PtrFallback(p, limit, value); } } #endif // STORAGE_LEVELDB_UTIL_CODING_H_
zzxiaogx-leveldb
util/coding.h
C++
bsd
3,928
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <algorithm> #include <stdint.h> #include "leveldb/comparator.h" #include "leveldb/slice.h" #include "util/logging.h" namespace leveldb { Comparator::~Comparator() { } namespace { class BytewiseComparatorImpl : public Comparator { public: BytewiseComparatorImpl() { } virtual const char* Name() const { return "leveldb.BytewiseComparator"; } virtual int Compare(const Slice& a, const Slice& b) const { return a.compare(b); } virtual void FindShortestSeparator( std::string* start, const Slice& limit) const { // Find length of common prefix size_t min_length = std::min(start->size(), limit.size()); size_t diff_index = 0; while ((diff_index < min_length) && ((*start)[diff_index] == limit[diff_index])) { diff_index++; } if (diff_index >= min_length) { // Do not shorten if one string is a prefix of the other } else { uint8_t diff_byte = static_cast<uint8_t>((*start)[diff_index]); if (diff_byte < static_cast<uint8_t>(0xff) && diff_byte + 1 < static_cast<uint8_t>(limit[diff_index])) { (*start)[diff_index]++; start->resize(diff_index + 1); assert(Compare(*start, limit) < 0); } } } virtual void FindShortSuccessor(std::string* key) const { // Find first character that can be incremented size_t n = key->size(); for (size_t i = 0; i < n; i++) { const uint8_t byte = (*key)[i]; if (byte != static_cast<uint8_t>(0xff)) { (*key)[i] = byte + 1; key->resize(i+1); return; } } // *key is a run of 0xffs. Leave it alone. } }; } static const BytewiseComparatorImpl bytewise; const Comparator* BytewiseComparator() { return &bytewise; } }
zzxiaogx-leveldb
util/comparator.cc
C++
bsd
1,980
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <assert.h> #include <stdio.h> #include <stdlib.h> #include "leveldb/cache.h" #include "port/port.h" #include "util/hash.h" #include "util/mutexlock.h" namespace leveldb { Cache::~Cache() { } namespace { // LRU cache implementation // An entry is a variable length heap-allocated structure. Entries // are kept in a circular doubly linked list ordered by access time. struct LRUHandle { void* value; void (*deleter)(const Slice&, void* value); LRUHandle* next_hash; LRUHandle* next; LRUHandle* prev; size_t charge; // TODO(opt): Only allow uint32_t? size_t key_length; uint32_t refs; uint32_t hash; // Hash of key(); used for fast sharding and comparisons char key_data[1]; // Beginning of key Slice key() const { // For cheaper lookups, we allow a temporary Handle object // to store a pointer to a key in "value". if (next == this) { return *(reinterpret_cast<Slice*>(value)); } else { return Slice(key_data, key_length); } } }; // We provide our own simple hash table since it removes a whole bunch // of porting hacks and is also faster than some of the built-in hash // table implementations in some of the compiler/runtime combinations // we have tested. E.g., readrandom speeds up by ~5% over the g++ // 4.4.3's builtin hashtable. class HandleTable { public: HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); } ~HandleTable() { delete[] list_; } LRUHandle* Lookup(const Slice& key, uint32_t hash) { return *FindPointer(key, hash); } LRUHandle* Insert(LRUHandle* h) { LRUHandle** ptr = FindPointer(h->key(), h->hash); LRUHandle* old = *ptr; h->next_hash = (old == NULL ? NULL : old->next_hash); *ptr = h; if (old == NULL) { ++elems_; if (elems_ > length_) { // Since each cache entry is fairly large, we aim for a small // average linked list length (<= 1). Resize(); } } return old; } LRUHandle* Remove(const Slice& key, uint32_t hash) { LRUHandle** ptr = FindPointer(key, hash); LRUHandle* result = *ptr; if (result != NULL) { *ptr = result->next_hash; --elems_; } return result; } private: // The table consists of an array of buckets where each bucket is // a linked list of cache entries that hash into the bucket. uint32_t length_; uint32_t elems_; LRUHandle** list_; // Return a pointer to slot that points to a cache entry that // matches key/hash. If there is no such cache entry, return a // pointer to the trailing slot in the corresponding linked list. LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** ptr = &list_[hash & (length_ - 1)]; while (*ptr != NULL && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } return ptr; } void Resize() { uint32_t new_length = 4; while (new_length < elems_) { new_length *= 2; } LRUHandle** new_list = new LRUHandle*[new_length]; memset(new_list, 0, sizeof(new_list[0]) * new_length); uint32_t count = 0; for (uint32_t i = 0; i < length_; i++) { LRUHandle* h = list_[i]; while (h != NULL) { LRUHandle* next = h->next_hash; Slice key = h->key(); uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; h->next_hash = *ptr; *ptr = h; h = next; count++; } } assert(elems_ == count); delete[] list_; list_ = new_list; length_ = new_length; } }; // A single shard of sharded cache. class LRUCache { public: LRUCache(); ~LRUCache(); // Separate from constructor so caller can easily make an array of LRUCache void SetCapacity(size_t capacity) { capacity_ = capacity; } // Like Cache methods, but with an extra "hash" parameter. Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)); Cache::Handle* Lookup(const Slice& key, uint32_t hash); void Release(Cache::Handle* handle); void Erase(const Slice& key, uint32_t hash); private: void LRU_Remove(LRUHandle* e); void LRU_Append(LRUHandle* e); void Unref(LRUHandle* e); // Initialized before use. size_t capacity_; // mutex_ protects the following state. port::Mutex mutex_; size_t usage_; uint64_t last_id_; // Dummy head of LRU list. // lru.prev is newest entry, lru.next is oldest entry. LRUHandle lru_; HandleTable table_; }; LRUCache::LRUCache() : usage_(0), last_id_(0) { // Make empty circular linked list lru_.next = &lru_; lru_.prev = &lru_; } LRUCache::~LRUCache() { for (LRUHandle* e = lru_.next; e != &lru_; ) { LRUHandle* next = e->next; assert(e->refs == 1); // Error if caller has an unreleased handle Unref(e); e = next; } } void LRUCache::Unref(LRUHandle* e) { assert(e->refs > 0); e->refs--; if (e->refs <= 0) { usage_ -= e->charge; (*e->deleter)(e->key(), e->value); free(e); } } void LRUCache::LRU_Remove(LRUHandle* e) { e->next->prev = e->prev; e->prev->next = e->next; } void LRUCache::LRU_Append(LRUHandle* e) { // Make "e" newest entry by inserting just before lru_ e->next = &lru_; e->prev = lru_.prev; e->prev->next = e; e->next->prev = e; } Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { MutexLock l(&mutex_); LRUHandle* e = table_.Lookup(key, hash); if (e != NULL) { e->refs++; LRU_Remove(e); LRU_Append(e); } return reinterpret_cast<Cache::Handle*>(e); } void LRUCache::Release(Cache::Handle* handle) { MutexLock l(&mutex_); Unref(reinterpret_cast<LRUHandle*>(handle)); } Cache::Handle* LRUCache::Insert( const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) { MutexLock l(&mutex_); LRUHandle* e = reinterpret_cast<LRUHandle*>( malloc(sizeof(LRUHandle)-1 + key.size())); e->value = value; e->deleter = deleter; e->charge = charge; e->key_length = key.size(); e->hash = hash; e->refs = 2; // One from LRUCache, one for the returned handle memcpy(e->key_data, key.data(), key.size()); LRU_Append(e); usage_ += charge; LRUHandle* old = table_.Insert(e); if (old != NULL) { LRU_Remove(old); Unref(old); } while (usage_ > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; LRU_Remove(old); table_.Remove(old->key(), old->hash); Unref(old); } return reinterpret_cast<Cache::Handle*>(e); } void LRUCache::Erase(const Slice& key, uint32_t hash) { MutexLock l(&mutex_); LRUHandle* e = table_.Remove(key, hash); if (e != NULL) { LRU_Remove(e); Unref(e); } } static const int kNumShardBits = 4; static const int kNumShards = 1 << kNumShardBits; class ShardedLRUCache : public Cache { private: LRUCache shard_[kNumShards]; port::Mutex id_mutex_; uint64_t last_id_; static inline uint32_t HashSlice(const Slice& s) { return Hash(s.data(), s.size(), 0); } static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); } public: explicit ShardedLRUCache(size_t capacity) : last_id_(0) { const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards; for (int s = 0; s < kNumShards; s++) { shard_[s].SetCapacity(per_shard); } } virtual ~ShardedLRUCache() { } virtual Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter); } virtual Handle* Lookup(const Slice& key) { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Lookup(key, hash); } virtual void Release(Handle* handle) { LRUHandle* h = reinterpret_cast<LRUHandle*>(handle); shard_[Shard(h->hash)].Release(handle); } virtual void Erase(const Slice& key) { const uint32_t hash = HashSlice(key); shard_[Shard(hash)].Erase(key, hash); } virtual void* Value(Handle* handle) { return reinterpret_cast<LRUHandle*>(handle)->value; } virtual uint64_t NewId() { MutexLock l(&id_mutex_); return ++(last_id_); } }; } // end anonymous namespace Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); } }
zzxiaogx-leveldb
util/cache.cc
C++
bsd
8,712
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/testutil.h" #include "util/random.h" namespace leveldb { namespace test { Slice RandomString(Random* rnd, int len, std::string* dst) { dst->resize(len); for (int i = 0; i < len; i++) { (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~' } return Slice(*dst); } std::string RandomKey(Random* rnd, int len) { // Make sure to generate a wide variety of characters so we // test the boundary conditions for short-key optimizations. static const char kTestChars[] = { '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff' }; std::string result; for (int i = 0; i < len; i++) { result += kTestChars[rnd->Uniform(sizeof(kTestChars))]; } return result; } extern Slice CompressibleString(Random* rnd, double compressed_fraction, int len, std::string* dst) { int raw = static_cast<int>(len * compressed_fraction); if (raw < 1) raw = 1; std::string raw_data; RandomString(rnd, raw, &raw_data); // Duplicate the random data until we have filled "len" bytes dst->clear(); while (dst->size() < len) { dst->append(raw_data); } dst->resize(len); return Slice(*dst); } } }
zzxiaogx-leveldb
util/testutil.cc
C++
bsd
1,413
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Must not be included from any .h files to avoid polluting the namespace // with macros. #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ #include <stdio.h> #include <stdint.h> #include <string> #include "port/port.h" namespace leveldb { class Slice; class WritableFile; // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string* str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". extern void AppendEscapedStringTo(std::string* str, const Slice& value); // Return a human-readable printout of "num" extern std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice& value); // If *in starts with "c", advances *in past the first character and // returns true. Otherwise, returns false. extern bool ConsumeChar(Slice* in, char c); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val); } #endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
zzxiaogx-leveldb
util/logging.h
C++
bsd
1,572
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/arena.h" #include "util/random.h" #include "util/testharness.h" namespace leveldb { class ArenaTest { }; TEST(ArenaTest, Empty) { Arena arena; } TEST(ArenaTest, Simple) { std::vector<std::pair<size_t, char*> > allocated; Arena arena; const int N = 100000; size_t bytes = 0; Random rnd(301); for (int i = 0; i < N; i++) { size_t s; if (i % (N / 10) == 0) { s = i; } else { s = rnd.OneIn(4000) ? rnd.Uniform(6000) : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20)); } if (s == 0) { // Our arena disallows size 0 allocations. s = 1; } char* r; if (rnd.OneIn(10)) { r = arena.AllocateAligned(s); } else { r = arena.Allocate(s); } for (int b = 0; b < s; b++) { // Fill the "i"th allocation with a known bit pattern r[b] = i % 256; } bytes += s; allocated.push_back(std::make_pair(s, r)); ASSERT_GE(arena.MemoryUsage(), bytes); if (i > N/10) { ASSERT_LE(arena.MemoryUsage(), bytes * 1.10); } } for (int i = 0; i < allocated.size(); i++) { size_t num_bytes = allocated[i].first; const char* p = allocated[i].second; for (int b = 0; b < num_bytes; b++) { // Check the "i"th allocation for the known bit pattern ASSERT_EQ(int(p[b]) & 0xff, i % 256); } } } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
util/arena_test.cc
C++
bsd
1,648
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <math.h> #include <stdio.h> #include "port/port.h" #include "util/histogram.h" namespace leveldb { const double Histogram::kBucketLimit[kNumBuckets] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000, 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000, 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000, 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000, 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000, 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000, 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000, 180000000, 200000000, 250000000, 300000000, 350000000, 400000000, 450000000, 500000000, 600000000, 700000000, 800000000, 900000000, 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000, 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0, 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0, 1e200, }; void Histogram::Clear() { min_ = kBucketLimit[kNumBuckets-1]; max_ = 0; num_ = 0; sum_ = 0; sum_squares_ = 0; for (int i = 0; i < kNumBuckets; i++) { buckets_[i] = 0; } } void Histogram::Add(double value) { // Linear search is fast enough for our usage in db_bench int b = 0; while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) { b++; } buckets_[b] += 1.0; if (min_ > value) min_ = value; if (max_ < value) max_ = value; num_++; sum_ += value; sum_squares_ += (value * value); } void Histogram::Merge(const Histogram& other) { if (other.min_ < min_) min_ = other.min_; if (other.max_ > max_) max_ = other.max_; num_ += other.num_; sum_ += other.sum_; sum_squares_ += other.sum_squares_; for (int b = 0; b < kNumBuckets; b++) { buckets_[b] += other.buckets_[b]; } } double Histogram::Median() const { return Percentile(50.0); } double Histogram::Percentile(double p) const { double threshold = num_ * (p / 100.0); double sum = 0; for (int b = 0; b < kNumBuckets; b++) { sum += buckets_[b]; if (sum >= threshold) { // Scale linearly within this bucket double left_point = (b == 0) ? 0 : kBucketLimit[b-1]; double right_point = kBucketLimit[b]; double left_sum = sum - buckets_[b]; double right_sum = sum; double pos = (threshold - left_sum) / (right_sum - left_sum); double r = left_point + (right_point - left_point) * pos; if (r < min_) r = min_; if (r > max_) r = max_; return r; } } return max_; } double Histogram::Average() const { if (num_ == 0.0) return 0; return sum_ / num_; } double Histogram::StandardDeviation() const { if (num_ == 0.0) return 0; double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_); return sqrt(variance); } std::string Histogram::ToString() const { std::string r; char buf[200]; snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, Average(), StandardDeviation()); r.append(buf); snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); r.append("------------------------------------------------------\n"); const double mult = 100.0 / num_; double sum = 0; for (int b = 0; b < kNumBuckets; b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", ((b == 0) ? 0.0 : kBucketLimit[b-1]), // left kBucketLimit[b], // right buckets_[b], // count mult * buckets_[b], // percentage mult * sum); // cumulative percentage r.append(buf); // Add hash marks based on percentage; 20 marks for 100%. int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5); r.append(marks, '#'); r.push_back('\n'); } return r; } }
zzxiaogx-leveldb
util/histogram.cc
C++
bsd
4,707
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/testharness.h" #include <string> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> namespace leveldb { namespace test { namespace { struct Test { const char* base; const char* name; void (*func)(); }; std::vector<Test>* tests; } bool RegisterTest(const char* base, const char* name, void (*func)()) { if (tests == NULL) { tests = new std::vector<Test>; } Test t; t.base = base; t.name = name; t.func = func; tests->push_back(t); return true; } int RunAllTests() { const char* matcher = getenv("LEVELDB_TESTS"); int num = 0; if (tests != NULL) { for (int i = 0; i < tests->size(); i++) { const Test& t = (*tests)[i]; if (matcher != NULL) { std::string name = t.base; name.push_back('.'); name.append(t.name); if (strstr(name.c_str(), matcher) == NULL) { continue; } } fprintf(stderr, "==== Test %s.%s\n", t.base, t.name); (*t.func)(); ++num; } } fprintf(stderr, "==== PASSED %d tests\n", num); return 0; } std::string TmpDir() { std::string dir; Status s = Env::Default()->GetTestDirectory(&dir); ASSERT_TRUE(s.ok()) << s.ToString(); return dir; } int RandomSeed() { const char* env = getenv("TEST_RANDOM_SEED"); int result = (env != NULL ? atoi(env) : 301); if (result <= 0) { result = 301; } return result; } } }
zzxiaogx-leveldb
util/testharness.cc
C++
bsd
1,624
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/options.h" #include "leveldb/comparator.h" #include "leveldb/env.h" namespace leveldb { Options::Options() : comparator(BytewiseComparator()), create_if_missing(false), error_if_exists(false), paranoid_checks(false), env(Env::Default()), info_log(NULL), write_buffer_size(4<<20), max_open_files(1000), block_cache(NULL), block_size(4096), block_restart_interval(16), compression(kSnappyCompression) { } }
zzxiaogx-leveldb
util/options.cc
C++
bsd
715
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #define STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #include <string> namespace leveldb { class Histogram { public: Histogram() { } ~Histogram() { } void Clear(); void Add(double value); void Merge(const Histogram& other); std::string ToString() const; private: double min_; double max_; double num_; double sum_; double sum_squares_; enum { kNumBuckets = 154 }; static const double kBucketLimit[kNumBuckets]; double buckets_[kNumBuckets]; double Median() const; double Percentile(double p) const; double Average() const; double StandardDeviation() const; }; } #endif // STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
zzxiaogx-leveldb
util/histogram.h
C++
bsd
903
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/logging.h" #include <errno.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include "leveldb/env.h" #include "leveldb/slice.h" namespace leveldb { void AppendNumberTo(std::string* str, uint64_t num) { char buf[30]; snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num); str->append(buf); } void AppendEscapedStringTo(std::string* str, const Slice& value) { for (size_t i = 0; i < value.size(); i++) { char c = value[i]; if (c >= ' ' && c <= '~') { str->push_back(c); } else { char buf[10]; snprintf(buf, sizeof(buf), "\\x%02x", static_cast<unsigned int>(c) & 0xff); str->append(buf); } } } std::string NumberToString(uint64_t num) { std::string r; AppendNumberTo(&r, num); return r; } std::string EscapeString(const Slice& value) { std::string r; AppendEscapedStringTo(&r, value); return r; } bool ConsumeChar(Slice* in, char c) { if (!in->empty() && (*in)[0] == c) { in->remove_prefix(1); return true; } else { return false; } } bool ConsumeDecimalNumber(Slice* in, uint64_t* val) { uint64_t v = 0; int digits = 0; while (!in->empty()) { char c = (*in)[0]; if (c >= '0' && c <= '9') { ++digits; const int delta = (c - '0'); static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0); if (v > kMaxUint64/10 || (v == kMaxUint64/10 && delta > kMaxUint64%10)) { // Overflow return false; } v = (v * 10) + delta; in->remove_prefix(1); } else { break; } } *val = v; return (digits > 0); } }
zzxiaogx-leveldb
util/logging.cc
C++
bsd
1,847
// Copyright 2011 Google Inc. All Rights Reserved. // Author: sanjay@google.com (Sanjay Ghemawat) // // Logger implementation that can be shared by all environments // where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #include <algorithm> #include <stdio.h> #include <sys/time.h> #include <time.h> #include "leveldb/env.h" namespace leveldb { class PosixLogger : public Logger { private: FILE* file_; uint64_t (*gettid_)(); // Return the thread id for the current thread public: PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } virtual ~PosixLogger() { fclose(file_); } virtual void Logv(const char* format, va_list ap) { const uint64_t thread_id = (*gettid_)(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char* base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char* p = base; char* limit = base + bufsize; struct timeval now_tv; gettimeofday(&now_tv, NULL); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); fwrite(base, 1, p - base, file_); fflush(file_); if (base != buffer) { delete[] base; } break; } } }; } #endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
zzxiaogx-leveldb
util/posix_logger.h
C++
bsd
2,605
body { margin-left: 0.5in; margin-right: 0.5in; background: white; color: black; } h1 { margin-left: -0.2in; font-size: 14pt; } h2 { margin-left: -0in; font-size: 12pt; } h3 { margin-left: -0in; } h4 { margin-left: -0in; } hr { margin-left: -0in; } /* Definition lists: definition term bold */ dt { font-weight: bold; } address { text-align: center; } code,samp,var { color: blue; } kbd { color: #600000; } div.note p { float: right; width: 3in; margin-right: 0%; padding: 1px; border: 2px solid #6060a0; background-color: #fffff0; } ul { margin-top: -0em; margin-bottom: -0em; } ol { margin-top: -0em; margin-bottom: -0em; } UL.nobullets { list-style-type: none; list-style-image: none; margin-left: -1em; } p { margin: 1em 0 1em 0; padding: 0 0 0 0; } pre { line-height: 1.3em; padding: 0.4em 0 0.8em 0; margin: 0 0 0 0; border: 0 0 0 0; color: blue; } .datatable { margin-left: auto; margin-right: auto; margin-top: 2em; margin-bottom: 2em; border: 1px solid; } .datatable td,th { padding: 0 0.5em 0 0.5em; text-align: right; }
zzxiaogx-leveldb
doc/doc.css
CSS
bsd
1,127
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> #include <stdlib.h> #include <kcpolydb.h> #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" // Comma-separated list of operations to run in the specified order // Actual benchmarks: // // fillseq -- write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillseqsync -- write N/100 values in sequential key order in sync mode // fillrandsync -- write N/100 values in random key order in sync mode // fillrand100K -- write N/1000 100K values in random order in async mode // fillseq100K -- write N/1000 100K values in seq order in async mode // readseq -- read N times sequentially // readseq100K -- read N/1000 100K values in sequential order in async mode // readrand100K -- read N/1000 100K values in sequential order in async mode // readrandom -- read N times in random order static const char* FLAGS_benchmarks = "fillseq," "fillseqsync," "fillrandsync," "fillrandom," "overwrite," "readrandom," "readseq," "fillrand100K," "fillseq100K," "readseq100K," "readrand100K," ; // Number of key/values to place in database static int FLAGS_num = 1000000; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Size of each value static int FLAGS_value_size = 100; // Arrange to generate values that shrink to this fraction of // their original size after compression static double FLAGS_compression_ratio = 0.5; // Print histogram of operation timings static bool FLAGS_histogram = false; // Cache size. Default 4 MB static int FLAGS_cache_size = 4194304; // Page size. Default 1 KB static int FLAGS_page_size = 1024; // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. static bool FLAGS_use_existing_db = false; // Compression flag. If true, compression is on. If false, compression // is off. static bool FLAGS_compression = true; inline static void DBSynchronize(kyotocabinet::TreeDB* db_) { // Synchronize will flush writes to disk if (!db_->synchronize()) { fprintf(stderr, "synchronize error: %s\n", db_->error().name()); } } namespace leveldb { // Helper for quickly generating random data. namespace { class RandomGenerator { private: std::string data_; int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(int len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static Slice TrimSpace(Slice s) { int start = 0; while (start < s.size() && isspace(s[start])) { start++; } int limit = s.size(); while (limit > start && isspace(s[limit-1])) { limit--; } return Slice(s.data() + start, limit - start); } } class Benchmark { private: kyotocabinet::TreeDB* db_; int db_num_; int num_; int reads_; double start_; double last_op_finish_; int64_t bytes_; std::string message_; Histogram hist_; RandomGenerator gen_; Random rand_; kyotocabinet::LZOCompressor<kyotocabinet::LZO::RAW> comp_; // State kept for progress messages int done_; int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); fprintf(stdout, "Keys: %d bytes each\n", kKeySize); fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", FLAGS_value_size, static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf(stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" ); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { fprintf(stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n", kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); #if defined(__linux) time_t now = time(NULL); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != NULL) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != NULL) { const char* sep = strchr(line, ':'); if (sep == NULL) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } void Start() { start_ = Env::Default()->NowMicros() * 1e-6; bytes_ = 0; message_.clear(); last_op_finish_ = start_; hist_.Clear(); done_ = 0; next_report_ = 100; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = Env::Default()->NowMicros() * 1e-6; double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void Stop(const Slice& name) { double finish = Env::Default()->NowMicros() * 1e-6; // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; if (bytes_ > 0) { char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } fflush(stdout); } public: enum Order { SEQUENTIAL, RANDOM }; enum DBState { FRESH, EXISTING }; Benchmark() : db_(NULL), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), bytes_(0), rand_(301) { std::vector<std::string> files; Env::Default()->GetChildren("/tmp", &files); if (!FLAGS_use_existing_db) { for (int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("dbbench_polyDB")) { Env::Default()->DeleteFile("/tmp/" + files[i]); } } } } ~Benchmark() { if (!db_->close()) { fprintf(stderr, "close error: %s\n", db_->error().name()); } } void Run() { PrintHeader(); Open(false); const char* benchmarks = FLAGS_benchmarks; while (benchmarks != NULL) { const char* sep = strchr(benchmarks, ','); Slice name; if (sep == NULL) { name = benchmarks; benchmarks = NULL; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } Start(); bool known = true; bool write_sync = false; if (name == Slice("fillseq")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1); } else if (name == Slice("fillrandom")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("overwrite")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillrandsync")) { write_sync = true; Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillseqsync")) { write_sync = true; Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillrand100K")) { Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1); DBSynchronize(db_); } else if (name == Slice("fillseq100K")) { Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1); DBSynchronize(db_); } else if (name == Slice("readseq")) { ReadSequential(); } else if (name == Slice("readrandom")) { ReadRandom(); } else if (name == Slice("readrand100K")) { int n = reads_; reads_ /= 1000; ReadRandom(); reads_ = n; } else if (name == Slice("readseq100K")) { int n = reads_; reads_ /= 1000; ReadSequential(); reads_ = n; } else { known = false; if (name != Slice()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } if (known) { Stop(name); } } } private: void Open(bool sync) { assert(db_ == NULL); // Initialize db_ db_ = new kyotocabinet::TreeDB(); char file_name[100]; db_num_++; snprintf(file_name, sizeof(file_name), "/tmp/dbbench_polyDB-%d.kct", db_num_); // Create tuning options and open the database int open_options = kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE; int tune_options = kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR; if (FLAGS_compression) { tune_options |= kyotocabinet::TreeDB::TCOMPRESS; db_->tune_compressor(&comp_); } db_->tune_options(tune_options); db_->tune_page_cache(FLAGS_cache_size); db_->tune_page(FLAGS_page_size); db_->tune_map(256LL<<20); if (sync) { open_options |= kyotocabinet::PolyDB::OAUTOSYNC; } if (!db_->open(file_name, open_options)) { fprintf(stderr, "open error: %s\n", db_->error().name()); } } void Write(bool sync, Order order, DBState state, int num_entries, int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { message_ = "skipping (--use_existing_db is true)"; return; } delete db_; db_ = NULL; Open(sync); Start(); // Do not count time taken to destroy/open } if (num_entries != num_) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } // Write to database for (int i = 0; i < num_entries; i++) { const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); bytes_ += value_size + strlen(key); std::string cpp_key = key; if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) { fprintf(stderr, "set error: %s\n", db_->error().name()); } FinishedSingleOp(); } } void ReadSequential() { kyotocabinet::DB::Cursor* cur = db_->cursor(); cur->jump(); std::string ckey, cvalue; while (cur->get(&ckey, &cvalue, true)) { bytes_ += ckey.size() + cvalue.size(); FinishedSingleOp(); } delete cur; } void ReadRandom() { std::string value; for (int i = 0; i < reads_; i++) { char key[100]; const int k = rand_.Next() % reads_; snprintf(key, sizeof(key), "%016d", k); db_->get(key, &value); FinishedSingleOp(); } } }; } int main(int argc, char** argv) { for (int i = 1; i < argc; i++) { double d; int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) { FLAGS_compression_ratio = d; } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) { FLAGS_cache_size = n; } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) { FLAGS_page_size = n; } else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_compression = (n == 1) ? true : false; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } leveldb::Benchmark benchmark; benchmark.Run(); return 0; }
zzxiaogx-leveldb
doc/bench/db_bench_tree_db.cc
C++
bsd
15,180
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> #include <stdlib.h> #include <sqlite3.h> #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" // Comma-separated list of operations to run in the specified order // Actual benchmarks: // // fillseq -- write N values in sequential key order in async mode // fillseqsync -- write N/100 values in sequential key order in sync mode // fillseqbatch -- batch write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // fillrandsync -- write N/100 values in random key order in sync mode // fillrandbatch -- batch write N values in sequential key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillrand100K -- write N/1000 100K values in random order in async mode // fillseq100K -- write N/1000 100K values in sequential order in async mode // readseq -- read N times sequentially // readrandom -- read N times in random order // readrand100K -- read N/1000 100K values in sequential order in async mode static const char* FLAGS_benchmarks = "fillseq," "fillseqsync," "fillseqbatch," "fillrandom," "fillrandsync," "fillrandbatch," "overwrite," "overwritebatch," "readrandom," "readseq," "fillrand100K," "fillseq100K," "readseq," "readrand100K," ; // Number of key/values to place in database static int FLAGS_num = 1000000; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Size of each value static int FLAGS_value_size = 100; // Print histogram of operation timings static bool FLAGS_histogram = false; // Arrange to generate values that shrink to this fraction of // their original size after compression static double FLAGS_compression_ratio = 0.5; // Page size. Default 1 KB. static int FLAGS_page_size = 1024; // Number of pages. // Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB. static int FLAGS_num_pages = 4096; // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. static bool FLAGS_use_existing_db = false; // If true, we allow batch writes to occur static bool FLAGS_transaction = true; // If true, we enable Write-Ahead Logging static bool FLAGS_WAL_enabled = true; inline static void ExecErrorCheck(int status, char *err_msg) { if (status != SQLITE_OK) { fprintf(stderr, "SQL error: %s\n", err_msg); sqlite3_free(err_msg); exit(1); } } inline static void StepErrorCheck(int status) { if (status != SQLITE_DONE) { fprintf(stderr, "SQL step error: status = %d\n", status); exit(1); } } inline static void ErrorCheck(int status) { if (status != SQLITE_OK) { fprintf(stderr, "sqlite3 error: status = %d\n", status); exit(1); } } inline static void WalCheckpoint(sqlite3* db_) { // Flush all writes to disk if (FLAGS_WAL_enabled) { sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL); } } namespace leveldb { // Helper for quickly generating random data. namespace { class RandomGenerator { private: std::string data_; int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(int len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static Slice TrimSpace(Slice s) { int start = 0; while (start < s.size() && isspace(s[start])) { start++; } int limit = s.size(); while (limit > start && isspace(s[limit-1])) { limit--; } return Slice(s.data() + start, limit - start); } } class Benchmark { private: sqlite3* db_; int db_num_; int num_; int reads_; double start_; double last_op_finish_; int64_t bytes_; std::string message_; Histogram hist_; RandomGenerator gen_; Random rand_; // State kept for progress messages int done_; int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); fprintf(stdout, "Keys: %d bytes each\n", kKeySize); fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf(stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" ); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); #if defined(__linux) time_t now = time(NULL); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != NULL) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != NULL) { const char* sep = strchr(line, ':'); if (sep == NULL) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } void Start() { start_ = Env::Default()->NowMicros() * 1e-6; bytes_ = 0; message_.clear(); last_op_finish_ = start_; hist_.Clear(); done_ = 0; next_report_ = 100; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = Env::Default()->NowMicros() * 1e-6; double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void Stop(const Slice& name) { double finish = Env::Default()->NowMicros() * 1e-6; // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; if (bytes_ > 0) { char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } fflush(stdout); } public: enum Order { SEQUENTIAL, RANDOM }; enum DBState { FRESH, EXISTING }; Benchmark() : db_(NULL), db_num_(0), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), bytes_(0), rand_(301) { std::vector<std::string> files; Env::Default()->GetChildren("/tmp", &files); if (!FLAGS_use_existing_db) { for (int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("dbbench_sqlite3")) { Env::Default()->DeleteFile("/tmp/" + files[i]); } } } } ~Benchmark() { int status = sqlite3_close(db_); ErrorCheck(status); } void Run() { PrintHeader(); Open(); const char* benchmarks = FLAGS_benchmarks; while (benchmarks != NULL) { const char* sep = strchr(benchmarks, ','); Slice name; if (sep == NULL) { name = benchmarks; benchmarks = NULL; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } bytes_ = 0; Start(); bool known = true; bool write_sync = false; if (name == Slice("fillseq")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillseqbatch")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("fillrandom")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillrandbatch")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("overwrite")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("overwritebatch")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("fillrandsync")) { write_sync = true; Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillseqsync")) { write_sync = true; Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillrand100K")) { Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1); WalCheckpoint(db_); } else if (name == Slice("fillseq100K")) { Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1); WalCheckpoint(db_); } else if (name == Slice("readseq")) { ReadSequential(); } else if (name == Slice("readrandom")) { Read(RANDOM, 1); } else if (name == Slice("readrand100K")) { int n = reads_; reads_ /= 1000; Read(RANDOM, 1); reads_ = n; } else { known = false; if (name != Slice()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } if (known) { Stop(name); } } } void Open() { assert(db_ == NULL); int status; char file_name[100]; char* err_msg = NULL; db_num_++; // Open database snprintf(file_name, sizeof(file_name), "/tmp/dbbench_sqlite3-%d.db", db_num_); status = sqlite3_open(file_name, &db_); if (status) { fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); exit(1); } // Change SQLite cache size char cache_size[100]; snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", FLAGS_num_pages); status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); // FLAGS_page_size is defaulted to 1024 if (FLAGS_page_size != 1024) { char page_size[100]; snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", FLAGS_page_size); status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); } // Change journal mode to WAL if WAL enabled flag is on if (FLAGS_WAL_enabled) { std::string WAL_stmt = "PRAGMA journal_mode = WAL"; // LevelDB's default cache size is a combined 4 MB std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096"; status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); } // Change locking mode to exclusive and create tables/index for database std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE"; std::string create_stmt = "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; std::string stmt_array[] = { locking_stmt, create_stmt }; int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); for (int i = 0; i < stmt_array_length; i++) { status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); } } void Write(bool write_sync, Order order, DBState state, int num_entries, int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { message_ = "skipping (--use_existing_db is true)"; return; } sqlite3_close(db_); db_ = NULL; Open(); Start(); } if (num_entries != num_) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } char* err_msg = NULL; int status; sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt; std::string replace_str = "REPLACE INTO test (key, value) VALUES (?, ?)"; std::string begin_trans_str = "BEGIN TRANSACTION;"; std::string end_trans_str = "END TRANSACTION;"; // Check for synchronous flag in options std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF"; status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg); ExecErrorCheck(status, err_msg); // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt, NULL); ErrorCheck(status); status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, &begin_trans_stmt, NULL); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, NULL); ErrorCheck(status); bool transaction = (entries_per_batch > 1); for (int i = 0; i < num_entries; i += entries_per_batch) { // Begin write transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(begin_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(begin_trans_stmt); ErrorCheck(status); } // Create and execute SQL statements for (int j = 0; j < entries_per_batch; j++) { const char* value = gen_.Generate(value_size).data(); // Create values for key-value pair const int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); // Bind KV values into replace_stmt status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC); ErrorCheck(status); status = sqlite3_bind_blob(replace_stmt, 2, value, value_size, SQLITE_STATIC); ErrorCheck(status); // Execute replace_stmt bytes_ += value_size + strlen(key); status = sqlite3_step(replace_stmt); StepErrorCheck(status); // Reset SQLite statement for another use status = sqlite3_clear_bindings(replace_stmt); ErrorCheck(status); status = sqlite3_reset(replace_stmt); ErrorCheck(status); FinishedSingleOp(); } // End write transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(end_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(end_trans_stmt); ErrorCheck(status); } } status = sqlite3_finalize(replace_stmt); ErrorCheck(status); status = sqlite3_finalize(begin_trans_stmt); ErrorCheck(status); status = sqlite3_finalize(end_trans_stmt); ErrorCheck(status); } void Read(Order order, int entries_per_batch) { int status; sqlite3_stmt *read_stmt, *begin_trans_stmt, *end_trans_stmt; std::string read_str = "SELECT * FROM test WHERE key = ?"; std::string begin_trans_str = "BEGIN TRANSACTION;"; std::string end_trans_str = "END TRANSACTION;"; // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, &begin_trans_stmt, NULL); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, NULL); ErrorCheck(status); status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL); ErrorCheck(status); bool transaction = (entries_per_batch > 1); for (int i = 0; i < reads_; i += entries_per_batch) { // Begin read transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(begin_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(begin_trans_stmt); ErrorCheck(status); } // Create and execute SQL statements for (int j = 0; j < entries_per_batch; j++) { // Create key value char key[100]; int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_); snprintf(key, sizeof(key), "%016d", k); // Bind key value into read_stmt status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC); ErrorCheck(status); // Execute read statement while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW); StepErrorCheck(status); // Reset SQLite statement for another use status = sqlite3_clear_bindings(read_stmt); ErrorCheck(status); status = sqlite3_reset(read_stmt); ErrorCheck(status); FinishedSingleOp(); } // End read transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(end_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(end_trans_stmt); ErrorCheck(status); } } status = sqlite3_finalize(read_stmt); ErrorCheck(status); status = sqlite3_finalize(begin_trans_stmt); ErrorCheck(status); status = sqlite3_finalize(end_trans_stmt); ErrorCheck(status); } void ReadSequential() { int status; sqlite3_stmt *pStmt; std::string read_str = "SELECT * FROM test ORDER BY key"; status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL); ErrorCheck(status); for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) { bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2); FinishedSingleOp(); } status = sqlite3_finalize(pStmt); ErrorCheck(status); } }; } int main(int argc, char** argv) { for (int i = 1; i < argc; i++) { double d; int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) { FLAGS_compression_ratio = d; } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (leveldb::Slice(argv[i]) == leveldb::Slice("--no_transaction")) { FLAGS_transaction = false; } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) { FLAGS_page_size = n; } else if (sscanf(argv[i], "--num_pages=%d%c", &n, &junk) == 1) { FLAGS_num_pages = n; } else if (sscanf(argv[i], "--WAL_enabled=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_WAL_enabled = n; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } leveldb::Benchmark benchmark; benchmark.Run(); return 0; }
zzxiaogx-leveldb
doc/bench/db_bench_sqlite3.cc
C++
bsd
21,812
<!DOCTYPE html> <html> <head> <link rel="stylesheet" type="text/css" href="doc.css" /> <title>Leveldb file layout and compactions</title> </head> <body> <h1>Files</h1> The implementation of leveldb is similar in spirit to the representation of a single <a href="http://labs.google.com/papers/bigtable.html"> Bigtable tablet (section 5.3)</a>. However the organization of the files that make up the representation is somewhat different and is explained below. <p> Each database is represented by a set of files stored in a directory. There are several different types of files as documented below: <p> <h2>Log files</h2> <p> A log file (*.log) stores a sequence of recent updates. Each update is appended to the current log file. When the log file reaches a pre-determined size (approximately 4MB by default), it is converted to a sorted table (see below) and a new log file is created for future updates. <p> A copy of the current log file is kept in an in-memory structure (the <code>memtable</code>). This copy is consulted on every read so that read operations reflect all logged updates. <p> <h2>Sorted tables</h2> <p> A sorted table (*.sst) stores a sequence of entries sorted by key. Each entry is either a value for the key, or a deletion marker for the key. (Deletion markers are kept around to hide obsolete values present in older sorted tables). <p> The set of sorted tables are organized into a sequence of levels. The sorted table generated from a log file is placed in a special <code>young</code> level (also called level-0). When the number of young files exceeds a certain threshold (currently four), all of the young files are merged together with all of the overlapping level-1 files to produce a sequence of new level-1 files (we create a new level-1 file for every 2MB of data.) <p> Files in the young level may contain overlapping keys. However files in other levels have distinct non-overlapping key ranges. Consider level number L where L >= 1. When the combined size of files in level-L exceeds (10^L) MB (i.e., 10MB for level-1, 100MB for level-2, ...), one file in level-L, and all of the overlapping files in level-(L+1) are merged to form a set of new files for level-(L+1). These merges have the effect of gradually migrating new updates from the young level to the largest level using only bulk reads and writes (i.e., minimizing expensive seeks). <h2>Manifest</h2> <p> A MANIFEST file lists the set of sorted tables that make up each level, the corresponding key ranges, and other important metadata. A new MANIFEST file (with a new number embedded in the file name) is created whenever the database is reopened. The MANIFEST file is formatted as a log, and changes made to the serving state (as files are added or removed) are appended to this log. <p> <h2>Current</h2> <p> CURRENT is a simple text file that contains the name of the latest MANIFEST file. <p> <h2>Info logs</h2> <p> Informational messages are printed to files named LOG and LOG.old. <p> <h2>Others</h2> <p> Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp). <h1>Level 0</h1> When the log file grows above a certain size (1MB by default): <ul> <li>Create a brand new memtable and log file and direct future updates here <li>In the background: <ul> <li>Write the contents of the previous memtable to an sstable <li>Discard the memtable <li>Delete the old log file and the old memtable <li>Add the new sstable to the young (level-0) level. </ul> </ul> <h1>Compactions</h1> <p> When the size of level L exceeds its limit, we compact it in a background thread. The compaction picks a file from level L and all overlapping files from the next level L+1. Note that if a level-L file overlaps only part of a level-(L+1) file, the entire file at level-(L+1) is used as an input to the compaction and will be discarded after the compaction. Aside: because level-0 is special (files in it may overlap each other), we treat compactions from level-0 to level-1 specially: a level-0 compaction may pick more than one level-0 file in case some of these files overlap each other. <p> A compaction merges the contents of the picked files to produce a sequence of level-(L+1) files. We switch to producing a new level-(L+1) file after the current output file has reached the target file size (2MB). We also switch to a new output file when the key range of the current output file has grown enough to overlap more then ten level-(L+2) files. This last rule ensures that a later compaction of a level-(L+1) file will not pick up too much data from level-(L+2). <p> The old files are discarded and the new files are added to the serving state. <p> Compactions for a particular level rotate through the key space. In more detail, for each level L, we remember the ending key of the last compaction at level L. The next compaction for level L will pick the first file that starts after this key (wrapping around to the beginning of the key space if there is no such file). <p> Compactions drop overwritten values. They also drop deletion markers if there are no higher numbered levels that contain a file whose range overlaps the current key. <h2>Timing</h2> Level-0 compactions will read up to four 1MB files from level-0, and at worst all the level-1 files (10MB). I.e., we will read 14MB and write 14MB. <p> Other than the special level-0 compactions, we will pick one 2MB file from level L. In the worst case, this will overlap ~ 12 files from level L+1 (10 because level-(L+1) is ten times the size of level-L, and another two at the boundaries since the file ranges at level-L will usually not be aligned with the file ranges at level-L+1). The compaction will therefore read 26MB and write 26MB. Assuming a disk IO rate of 100MB/s (ballpark range for modern drives), the worst compaction cost will be approximately 0.5 second. <p> If we throttle the background writing to something small, say 10% of the full 100MB/s speed, a compaction may take up to 5 seconds. If the user is writing at 10MB/s, we might build up lots of level-0 files (~50 to hold the 5*10MB). This may signficantly increase the cost of reads due to the overhead of merging more files together on every read. <p> Solution 1: To reduce this problem, we might want to increase the log switching threshold when the number of level-0 files is large. Though the downside is that the larger this threshold, the more memory we will need to hold the corresponding memtable. <p> Solution 2: We might want to decrease write rate artificially when the number of level-0 files goes up. <p> Solution 3: We work on reducing the cost of very wide merges. Perhaps most of the level-0 files will have their blocks sitting uncompressed in the cache and we will only need to worry about the O(N) complexity in the merging iterator. <h2>Number of files</h2> Instead of always making 2MB files, we could make larger files for larger levels to reduce the total file count, though at the expense of more bursty compactions. Alternatively, we could shard the set of files into multiple directories. <p> An experiment on an <code>ext3</code> filesystem on Feb 04, 2011 shows the following timings to do 100K file opens in directories with varying number of files: <table class="datatable"> <tr><th>Files in directory</th><th>Microseconds to open a file</th></tr> <tr><td>1000</td><td>9</td> <tr><td>10000</td><td>10</td> <tr><td>100000</td><td>16</td> </table> So maybe even the sharding is not necessary on modern filesystems? <h1>Recovery</h1> <ul> <li> Read CURRENT to find name of the latest committed MANIFEST <li> Read the named MANIFEST file <li> Clean up stale files <li> We could open all sstables here, but it is probably better to be lazy... <li> Convert log chunk to a new level-0 sstable <li> Start directing new writes to a new log file with recovered sequence# </ul> <h1>Garbage collection of files</h1> <code>DeleteObsoleteFiles()</code> is called at the end of every compaction and at the end of recovery. It finds the names of all files in the database. It deletes all log files that are not the current log file. It deletes all table files that are not referenced from some level and are not the output of an active compaction. </body> </html>
zzxiaogx-leveldb
doc/impl.html
HTML
bsd
8,279
<!DOCTYPE html> <html> <head> <title>LevelDB Benchmarks</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <style> body { font-family:Helvetica,sans-serif; padding:20px; } h2 { padding-top:30px; } table.bn { width:800px; border-collapse:collapse; border:0; padding:0; } table.bnbase { width:650px; } table.bn td { padding:2px 0; } table.bn td.c1 { font-weight:bold; width:150px; } table.bn td.c1 div.e { float:right; font-weight:normal; } table.bn td.c2 { width:150px; text-align:right; padding:2px; } table.bn td.c3 { width:350px; } table.bn td.c4 { width:150px; font-size:small; padding-left:4px; } /* chart bars */ div.bldb { background-color:#0255df; } div.bkct { background-color:#df5555; } div.bsql { background-color:#aadf55; } .code { font-family:monospace; font-size:large; } .todo { color: red; } </style> </head> <body> <h1>LevelDB Benchmarks</h1> <p>Google, July 2011</p> <hr> <p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="http://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="http://fallabs.com/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p> <p>Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.</p> <h4>Benchmark Source Code</h4> <p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p> <ul> <li> <b>LevelDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/trunk/db/db_bench.cc">db/db_bench.cc</a>.</li> <li> <b>SQLite:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_sqlite3.cc">doc/bench/db_bench_sqlite3.cc</a>.</li> <li> <b>Kyoto TreeDB:</b> <a href="http://code.google.com/p/leveldb/source/browse/#svn%2Ftrunk%2Fdoc%2Fbench%2Fdb_bench_tree_db.cc">doc/bench/db_bench_tree_db.cc</a>.</li> </ul> <h4>Custom Build Specifications</h4> <ul> <li>LevelDB: LevelDB was compiled with the <a href="http://code.google.com/p/google-perftools">tcmalloc</a> library and the <a href="http://code.google.com/p/snappy/">Snappy</a> compression library (revision 33). Assertions were disabled.</li> <li>TreeDB: TreeDB was compiled using the <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li> <li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's <a href="http://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li> </ul> <h2>1. Baseline Performance</h2> <p>This section gives the baseline performance of all the databases. Following sections show how performance changes as various parameters are varied. For the baseline:</p> <ul> <li> Each database is allowed 4 MB of cache memory.</li> <li> Databases are opened in <em>asynchronous</em> write mode. (LevelDB's sync option, TreeDB's OAUTOSYNC option, and SQLite3's synchronous options are all turned off). I.e., every write is pushed to the operating system, but the benchmark does not wait for the write to reach the disk.</li> <li> Keys are 16 bytes each.</li> <li> Value are 100 bytes each (with enough redundancy so that a simple compressor shrinks them to 50% of their original size).</li> <li> Sequential reads/writes traverse the key space in increasing order.</li> <li> Random reads/writes traverse the key space in random order.</li> </ul> <h3>A. Sequential Reads</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">4,030,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,010,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:95px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">383,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:33px">&nbsp;</div></td> </table> <h3>B. Random Reads</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">129,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:298px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">151,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">134,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:310px">&nbsp;</div></td> </table> <h3>C. Sequential Writes</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">779,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">342,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:154px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">48,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:22px">&nbsp;</div></td> </table> <h3>D. Random Writes</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">164,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">88,500 ops/sec</td> <td class="c3"><div class="bkct" style="width:188px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">9,860 ops/sec</td> <td class="c3"><div class="bsql" style="width:21px">&nbsp;</div></td> </table> <p>LevelDB outperforms both SQLite3 and TreeDB in sequential and random write operations and sequential read operations. Kyoto Cabinet has the fastest random read operations.</p> <h2>2. Write Performance under Different Configurations</h2> <h3>A. Large Values </h3> <p>For this benchmark, we start with an empty database, and write 100,000 byte values (~50% compressible). To keep the benchmark running time reasonable, we stop after writing 1000 values.</p> <h4>Sequential Writes</h4> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">1,100 ops/sec</td> <td class="c3"><div class="bldb" style="width:234px">&nbsp;</div></td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:224px">&nbsp;</div></td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">1,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:350px">&nbsp;</div></td></tr> </table> <h4>Random Writes</h4> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">480 ops/sec</td> <td class="c3"><div class="bldb" style="width:105px">&nbsp;</div></td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,100 ops/sec</td> <td class="c3"><div class="bkct" style="width:240px">&nbsp;</div></td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">1,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:350px">&nbsp;</div></td></tr> </table> <p>LevelDB doesn't perform as well with large values of 100,000 bytes each. This is because LevelDB writes keys and values at least twice: first time to the transaction log, and second time (during a compaction) to a sorted file. With larger values, LevelDB's per-operation efficiency is swamped by the cost of extra copies of large values.</p> <h3>B. Batch Writes</h3> <p>A batch write is a set of writes that are applied atomically to the underlying database. A single batch of N writes may be significantly faster than N individual writes. The following benchmark writes one thousand batches where each batch contains one thousand 100-byte values. TreeDB does not support batch writes and is omitted from this benchmark.</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">840,000 entries/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.08x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">124,000 entries/sec</td> <td class="c3"><div class="bsql" style="width:52px">&nbsp;</div></td> <td class="c4">(2.55x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">221,000 entries/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.35x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">22,000 entries/sec</td> <td class="c3"><div class="bsql" style="width:34px">&nbsp;</div></td> <td class="c4">(2.23x baseline)</td></tr> </table> <p>Because of the way LevelDB persistent storage is organized, batches of random writes are not much slower (only a factor of 4x) than batches of sequential writes.</p> <h3>C. Synchronous Writes</h3> <p>In the following benchmark, we enable the synchronous writing modes of all of the databases. Since this change significantly slows down the benchmark, we stop after 10,000 writes. For synchronous write tests, we've disabled hard drive write-caching (using `hdparm -W 0 [device]`).</p> <ul> <li>For LevelDB, we set WriteOptions.sync = true.</li> <li>In TreeDB, we enabled TreeDB's OAUTOSYNC option.</li> <li>For SQLite3, we set "PRAGMA synchronous = FULL".</li> </ul> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">100 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.003x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">7 ops/sec</td> <td class="c3"><div class="bkct" style="width:27px">&nbsp;</div></td> <td class="c4">(0.0004x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">88 ops/sec</td> <td class="c3"><div class="bsql" style="width:315px">&nbsp;</div></td> <td class="c4">(0.002x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">100 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.015x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">8 ops/sec</td> <td class="c3"><div class="bkct" style="width:29px">&nbsp;</div></td> <td class="c4">(0.001x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">88 ops/sec</td> <td class="c3"><div class="bsql" style="width:314px">&nbsp;</div></td> <td class="c4">(0.009x baseline)</td></tr> </table> <p>Also see the <code>ext4</code> performance numbers below since synchronous writes behave significantly differently on <code>ext3</code> and <code>ext4</code>.</p> <h3>D. Turning Compression Off</h3> <p>In the baseline measurements, LevelDB and TreeDB were using light-weight compression (<a href="http://code.google.com/p/snappy/">Snappy</a> for LevelDB, and <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> for TreeDB). SQLite3, by default does not use compression. The experiments below show what happens when compression is disabled in all of the databases (the SQLite3 numbers are just a copy of its baseline measurements):</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">594,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.76x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">485,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:239px">&nbsp;</div></td> <td class="c4">(1.42x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">48,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:29px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">135,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:296px">&nbsp;</div></td> <td class="c4">(0.82x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">159,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(1.80x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">9,860 ops/sec</td> <td class="c3"><div class="bsql" style="width:22px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <p>LevelDB's write performance is better with compression than without since compression decreases the amount of data that has to be written to disk. Therefore LevelDB users can leave compression enabled in most scenarios without having worry about a tradeoff between space usage and performance. TreeDB's performance on the other hand is better without compression than with compression. Presumably this is because TreeDB's compression library (LZO) is more expensive than LevelDB's compression library (Snappy).<p> <h3>E. Using More Memory</h3> <p>We increased the overall cache size for each database to 128 MB. For LevelDB, we partitioned 128 MB into a 120 MB write buffer and 8 MB of cache (up from 2 MB of write buffer and 2 MB of cache). For SQLite3, we kept the page size at 1024 bytes, but increased the number of pages to 131,072 (up from 4096). For TreeDB, we also kept the page size at 1024 bytes, but increased the cache size to 128 MB (up from 4 MB).</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">812,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.04x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">321,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:138px">&nbsp;</div></td> <td class="c4">(0.94x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">48,500 ops/sec</td> <td class="c3"><div class="bsql" style="width:21px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">355,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(2.16x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">284,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:280px">&nbsp;</div></td> <td class="c4">(3.21x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">9,670 ops/sec</td> <td class="c3"><div class="bsql" style="width:10px">&nbsp;</div></td> <td class="c4">(0.98x baseline)</td></tr> </table> <p>SQLite's performance does not change substantially when compared to the baseline, but the random write performance for both LevelDB and TreeDB increases significantly. LevelDB's performance improves because a larger write buffer reduces the need to merge sorted files (since it creates a smaller number of larger sorted files). TreeDB's performance goes up because the entire database is available in memory for fast in-place updates.</p> <h2>3. Read Performance under Different Configurations</h2> <h3>A. Larger Caches</h3> <p>We increased the overall memory usage to 128 MB for each database. For LevelDB, we allocated 8 MB to LevelDB's write buffer and 120 MB to LevelDB's cache. The other databases don't differentiate between a write buffer and a cache, so we simply set their cache size to 128 MB.</p> <h4>Sequential Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">5,210,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.29x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,070,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:72px">&nbsp;</div></td> <td class="c4">(1.06x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">609,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:41px">&nbsp;</div></td> <td class="c4">(1.59x baseline)</td></tr> </table> <h4>Random Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">190,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:144px">&nbsp;</div></td> <td class="c4">(1.47x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">463,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(3.07x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">186,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:141px">&nbsp;</div></td> <td class="c4">(1.39x baseline)</td></tr> </table> <p>As expected, the read performance of all of the databases increases when the caches are enlarged. In particular, TreeDB seems to make very effective use of a cache that is large enough to hold the entire database.</p> <h3>B. No Compression Reads </h3> <p>For this benchmark, we populated a database with 1 million entries consisting of 16 byte keys and 100 byte values. We compiled LevelDB and Kyoto Cabinet without compression support, so results that are read out from the database are already uncompressed. We've listed the SQLite3 baseline read performance as a point of comparison.</p> <h4>Sequential Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">4,880,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.21x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,230,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:88px">&nbsp;</div></td> <td class="c4">(3.60x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">383,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:27px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">149,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:300px">&nbsp;</div></td> <td class="c4">(1.16x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">175,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(1.16x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">134,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:268px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <p>Performance of both LevelDB and TreeDB improves a small amount when compression is disabled. Note however that under different workloads, performance may very well be better with compression if it allows more of the working set to fit in memory.</p> <h2>Note about Ext4 Filesystems</h2> <p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="http://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p> <h2>Acknowledgements</h2> <p>Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.</p> </body> </html>
zzxiaogx-leveldb
doc/benchmark.html
HTML
bsd
20,970
<!DOCTYPE html> <html> <head> <link rel="stylesheet" type="text/css" href="doc.css" /> <title>Leveldb</title> </head> <body> <h1>Leveldb</h1> <address>Jeff Dean, Sanjay Ghemawat</address> <p> The <code>leveldb</code> library provides a persistent key value store. Keys and values are arbitrary byte arrays. The keys are ordered within the key value store according to a user-specified comparator function. <p> <h1>Opening A Database</h1> <p> A <code>leveldb</code> database has a name which corresponds to a file system directory. All of the contents of database are stored in this directory. The following example shows how to open a database, creating it if necessary: <p> <pre> #include &lt;assert&gt; #include "leveldb/db.h" leveldb::DB* db; leveldb::Options options; options.create_if_missing = true; leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &amp;db); assert(status.ok()); ... </pre> If you want to raise an error if the database already exists, add the following line before the <code>leveldb::DB::Open</code> call: <pre> options.error_if_exists = true; </pre> <h1>Status</h1> <p> You may have noticed the <code>leveldb::Status</code> type above. Values of this type are returned by most functions in <code>leveldb</code> that may encounter an error. You can check if such a result is ok, and also print an associated error message: <p> <pre> leveldb::Status s = ...; if (!s.ok()) cerr &lt;&lt; s.ToString() &lt;&lt; endl; </pre> <h1>Closing A Database</h1> <p> When you are done with a database, just delete the database object. Example: <p> <pre> ... open the db as described above ... ... do something with db ... delete db; </pre> <h1>Reads And Writes</h1> <p> The database provides <code>Put</code>, <code>Delete</code>, and <code>Get</code> methods to modify/query the database. For example, the following code moves the value stored under key1 to key2. <pre> std::string value; leveldb::Status s = db-&gt;Get(leveldb::ReadOptions(), key1, &amp;value); if (s.ok()) s = db-&gt;Put(leveldb::WriteOptions(), key2, value); if (s.ok()) s = db-&gt;Delete(leveldb::WriteOptions(), key1); </pre> <h1>Atomic Updates</h1> <p> Note that if the process dies after the Put of key2 but before the delete of key1, the same value may be left stored under multiple keys. Such problems can be avoided by using the <code>WriteBatch</code> class to atomically apply a set of updates: <p> <pre> #include "leveldb/write_batch.h" ... std::string value; leveldb::Status s = db-&gt;Get(leveldb::ReadOptions(), key1, &amp;value); if (s.ok()) { leveldb::WriteBatch batch; batch.Delete(key1); batch.Put(key2, value); s = db-&gt;Write(leveldb::WriteOptions(), &amp;batch); } </pre> The <code>WriteBatch</code> holds a sequence of edits to be made to the database, and these edits within the batch are applied in order. Note that we called <code>Delete</code> before <code>Put</code> so that if <code>key1</code> is identical to <code>key2</code>, we do not end up erroneously dropping the value entirely. <p> Apart from its atomicity benefits, <code>WriteBatch</code> may also be used to speed up bulk updates by placing lots of individual mutations into the same batch. <h1>Synchronous Writes</h1> By default, each write to <code>leveldb</code> is asynchronous: it returns after pushing the write from the process into the operating system. The transfer from operating system memory to the underlying persistent storage happens asynchronously. The <code>sync</code> flag can be turned on for a particular write to make the write operation not return until the data being written has been pushed all the way to persistent storage. (On Posix systems, this is implemented by calling either <code>fsync(...)</code> or <code>fdatasync(...)</code> or <code>msync(..., MS_SYNC)</code> before the write operation returns.) <pre> leveldb::WriteOptions write_options; write_options.sync = true; db-&gt;Put(write_options, ...); </pre> Asynchronous writes are often more than a thousand times as fast as synchronous writes. The downside of asynchronous writes is that a crash of the machine may cause the last few updates to be lost. Note that a crash of just the writing process (i.e., not a reboot) will not cause any loss since even when <code>sync</code> is false, an update is pushed from the process memory into the operating system before it is considered done. <p> Asynchronous writes can often be used safely. For example, when loading a large amount of data into the database you can handle lost updates by restarting the bulk load after a crash. A hybrid scheme is also possible where every Nth write is synchronous, and in the event of a crash, the bulk load is restarted just after the last synchronous write finished by the previous run. (The synchronous write can update a marker that describes where to restart on a crash.) <p> <code>WriteBatch</code> provides an alternative to asynchronous writes. Multiple updates may be placed in the same <code>WriteBatch</code> and applied together using a synchronous write (i.e., <code>write_options.sync</code> is set to true). The extra cost of the synchronous write will be amortized across all of the writes in the batch. <p> <h1>Concurrency</h1> <p> A database may only be opened by one process at a time. The <code>leveldb</code> implementation acquires a lock from the operating system to prevent misuse. Within a single process, the same <code>leveldb::DB</code> object may be safely shared by multiple concurrent threads. I.e., different threads may write into or fetch iterators or call <code>Get</code> on the same database without any external synchronization (the leveldb implementation will automatically do the required synchronization). However other objects (like Iterator and WriteBatch) may require external synchronization. If two threads share such an object, they must protect access to it using their own locking protocol. More details are available in the public header files. <p> <h1>Iteration</h1> <p> The following example demonstrates how to print all key,value pairs in a database. <p> <pre> leveldb::Iterator* it = db-&gt;NewIterator(leveldb::ReadOptions()); for (it-&gt;SeekToFirst(); it-&gt;Valid(); it-&gt;Next()) { cout &lt;&lt; it-&gt;key().ToString() &lt;&lt; ": " &lt;&lt; it-&gt;value().ToString() &lt;&lt; endl; } assert(it-&gt;status().ok()); // Check for any errors found during the scan delete it; </pre> The following variation shows how to process just the keys in the range <code>[start,limit)</code>: <p> <pre> for (it-&gt;Seek(start); it-&gt;Valid() &amp;&amp; it-&gt;key().ToString() &lt; limit; it-&gt;Next()) { ... } </pre> You can also process entries in reverse order. (Caveat: reverse iteration may be somewhat slower than forward iteration.) <p> <pre> for (it-&gt;SeekToLast(); it-&gt;Valid(); it-&gt;Prev()) { ... } </pre> <h1>Snapshots</h1> <p> Snapshots provide consistent read-only views over the entire state of the key-value store. <code>ReadOptions::snapshot</code> may be non-NULL to indicate that a read should operate on a particular version of the DB state. If <code>ReadOptions::snapshot</code> is NULL, the read will operate on an implicit snapshot of the current state. <p> Snapshots typically are created by the DB::GetSnapshot() method: <p> <pre> leveldb::ReadOptions options; options.snapshot = db-&gt;GetSnapshot(); ... apply some updates to db ... leveldb::Iterator* iter = db-&gt;NewIterator(options); ... read using iter to view the state when the snapshot was created ... delete iter; db-&gt;ReleaseSnapshot(options.snapshot); </pre> Note that when a snapshot is no longer needed, it should be released using the DB::ReleaseSnapshot interface. This allows the implementation to get rid of state that was being maintained just to support reading as of that snapshot. <p> A Write operation can also return a snapshot that represents the state of the database just after applying a particular set of updates: <p> <pre> leveldb::Snapshot* snapshot; leveldb::WriteOptions write_options; write_options.post_write_snapshot = &amp;snapshot; leveldb::Status status = db-&gt;Write(write_options, ...); ... perform other mutations to db ... leveldb::ReadOptions read_options; read_options.snapshot = snapshot; leveldb::Iterator* iter = db-&gt;NewIterator(read_options); ... read as of the state just after the Write call returned ... delete iter; db-&gt;ReleaseSnapshot(snapshot); </pre> <h1>Slice</h1> <p> The return value of the <code>it->key()</code> and <code>it->value()</code> calls above are instances of the <code>leveldb::Slice</code> type. <code>Slice</code> is a simple structure that contains a length and a pointer to an external byte array. Returning a <code>Slice</code> is a cheaper alternative to returning a <code>std::string</code> since we do not need to copy potentially large keys and values. In addition, <code>leveldb</code> methods do not return null-terminated C-style strings since <code>leveldb</code> keys and values are allowed to contain '\0' bytes. <p> C++ strings and null-terminated C-style strings can be easily converted to a Slice: <p> <pre> leveldb::Slice s1 = "hello"; std::string str("world"); leveldb::Slice s2 = str; </pre> A Slice can be easily converted back to a C++ string: <pre> std::string str = s1.ToString(); assert(str == std::string("hello")); </pre> Be careful when using Slices since it is up to the caller to ensure that the external byte array into which the Slice points remains live while the Slice is in use. For example, the following is buggy: <p> <pre> leveldb::Slice slice; if (...) { std::string str = ...; slice = str; } Use(slice); </pre> When the <code>if</code> statement goes out of scope, <code>str</code> will be destroyed and the backing storage for <code>slice</code> will disappear. <p> <h1>Comparators</h1> <p> The preceding examples used the default ordering function for key, which orders bytes lexicographically. You can however supply a custom comparator when opening a database. For example, suppose each database key consists of two numbers and we should sort by the first number, breaking ties by the second number. First, define a proper subclass of <code>leveldb::Comparator</code> that expresses these rules: <p> <pre> class TwoPartComparator : public leveldb::Comparator { public: // Three-way comparison function: // if a &lt; b: negative result // if a &gt; b: positive result // else: zero result int Compare(const leveldb::Slice&amp; a, const leveldb::Slice&amp; b) const { int a1, a2, b1, b2; ParseKey(a, &amp;a1, &amp;a2); ParseKey(b, &amp;b1, &amp;b2); if (a1 &lt; b1) return -1; if (a1 &gt; b1) return +1; if (a2 &lt; b2) return -1; if (a2 &gt; b2) return +1; return 0; } // Ignore the following methods for now: const char* Name() const { return "TwoPartComparator"; } void FindShortestSeparator(std::string*, const leveldb::Slice&amp;) const { } void FindShortSuccessor(std::string*) const { } }; </pre> Now create a database using this custom comparator: <p> <pre> TwoPartComparator cmp; leveldb::DB* db; leveldb::Options options; options.create_if_missing = true; options.comparator = &amp;cmp; leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &amp;db); ... </pre> <h2>Backwards compatibility</h2> <p> The result of the comparator's <code>Name</code> method is attached to the database when it is created, and is checked on every subsequent database open. If the name changes, the <code>leveldb::DB::Open</code> call will fail. Therefore, change the name if and only if the new key format and comparison function are incompatible with existing databases, and it is ok to discard the contents of all existing databases. <p> You can however still gradually evolve your key format over time with a little bit of pre-planning. For example, you could store a version number at the end of each key (one byte should suffice for most uses). When you wish to switch to a new key format (e.g., adding an optional third part to the keys processed by <code>TwoPartComparator</code>), (a) keep the same comparator name (b) increment the version number for new keys (c) change the comparator function so it uses the version numbers found in the keys to decide how to interpret them. <p> <h1>Performance</h1> <p> Performance can be tuned by changing the default values of the types defined in <code>include/leveldb/options.h</code>. <p> <h2>Block size</h2> <p> <code>leveldb</code> groups adjacent keys together into the same block and such a block is the unit of transfer to and from persistent storage. The default block size is approximately 4096 uncompressed bytes. Applications that mostly do bulk scans over the contents of the database may wish to increase this size. Applications that do a lot of point reads of small values may wish to switch to a smaller block size if performance measurements indicate an improvement. There isn't much benefit in using blocks smaller than one kilobyte, or larger than a few megabytes. Also note that compression will be more effective with larger block sizes. <p> <h2>Compression</h2> <p> Each block is individually compressed before being written to persistent storage. Compression is on by default since the default compression method is very fast, and is automatically disabled for uncompressible data. In rare cases, applications may want to disable compression entirely, but should only do so if benchmarks show a performance improvement: <p> <pre> leveldb::Options options; options.compression = leveldb::kNoCompression; ... leveldb::DB::Open(options, name, ...) .... </pre> <h2>Cache</h2> <p> The contents of the database are stored in a set of files in the filesystem and each file stores a sequence of compressed blocks. If <code>options.cache</code> is non-NULL, it is used to cache frequently used uncompressed block contents. <p> <pre> #include "leveldb/cache.h" leveldb::Options options; options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache leveldb::DB* db; leveldb::DB::Open(options, name, &db); ... use the db ... delete db delete options.cache; </pre> Note that the cache holds uncompressed data, and therefore it should be sized according to application level data sizes, without any reduction from compression. (Caching of compressed blocks is left to the operating system buffer cache, or any custom <code>Env</code> implementation provided by the client.) <p> When performing a bulk read, the application may wish to disable caching so that the data processed by the bulk read does not end up displacing most of the cached contents. A per-iterator option can be used to achieve this: <p> <pre> leveldb::ReadOptions options; options.fill_cache = false; leveldb::Iterator* it = db-&gt;NewIterator(options); for (it-&gt;SeekToFirst(); it-&gt;Valid(); it-&gt;Next()) { ... } </pre> <h2>Key Layout</h2> <p> Note that the unit of disk transfer and caching is a block. Adjacent keys (according to the database sort order) will usually be placed in the same block. Therefore the application can improve its performance by placing keys that are accessed together near each other and placing infrequently used keys in a separate region of the key space. <p> For example, suppose we are implementing a simple file system on top of <code>leveldb</code>. The types of entries we might wish to store are: <p> <pre> filename -&gt; permission-bits, length, list of file_block_ids file_block_id -&gt; data </pre> We might want to prefix <code>filename</code> keys with one letter (say '/') and the <code>file_block_id</code> keys with a different letter (say '0') so that scans over just the metadata do not force us to fetch and cache bulky file contents. <p> <h1>Checksums</h1> <p> <code>leveldb</code> associates checksums with all data it stores in the file system. There are two separate controls provided over how aggressively these checksums are verified: <p> <ul> <li> <code>ReadOptions::verify_checksums</code> may be set to true to force checksum verification of all data that is read from the file system on behalf of a particular read. By default, no such verification is done. <p> <li> <code>Options::paranoid_checks</code> may be set to true before opening a database to make the database implementation raise an error as soon as it detects an internal corruption. Depending on which portion of the database has been corrupted, the error may be raised when the database is opened, or later by another database operation. By default, paranoid checking is off so that the database can be used even if parts of its persistent storage have been corrupted. <p> If a database is corrupted (perhaps it cannot be opened when paranoid checking is turned on), the <code>leveldb::RepairDB</code> function may be used to recover as much of the data as possible <p> </ul> <h1>Approximate Sizes</h1> <p> The <code>GetApproximateSizes</code> method can used to get the approximate number of bytes of file system space used by one or more key ranges. <p> <pre> leveldb::Range ranges[2]; ranges[0] = leveldb::Range("a", "c"); ranges[1] = leveldb::Range("x", "z"); uint64_t sizes[2]; leveldb::Status s = db-&gt;GetApproximateSizes(ranges, 2, sizes); </pre> The preceding call will set <code>sizes[0]</code> to the approximate number of bytes of file system space used by the key range <code>[a..c)</code> and <code>sizes[1]</code> to the approximate number of bytes used by the key range <code>[x..z)</code>. <p> <h1>Environment</h1> <p> All file operations (and other operating system calls) issued by the <code>leveldb</code> implementation are routed through a <code>leveldb::Env</code> object. Sophisticated clients may wish to provide their own <code>Env</code> implementation to get better control. For example, an application may introduce artificial delays in the file IO paths to limit the impact of <code>leveldb</code> on other activities in the system. <p> <pre> class SlowEnv : public leveldb::Env { .. implementation of the Env interface ... }; SlowEnv env; leveldb::Options options; options.env = &amp;env; Status s = leveldb::DB::Open(options, ...); </pre> <h1>Porting</h1> <p> <code>leveldb</code> may be ported to a new platform by providing platform specific implementations of the types/methods/functions exported by <code>leveldb/port/port.h</code>. See <code>leveldb/port/port_example.h</code> for more details. <p> In addition, the new platform may need a new default <code>leveldb::Env</code> implementation. See <code>leveldb/util/env_posix.h</code> for an example. <h1>Other Information</h1> <p> Details about the <code>leveldb</code> implementation may be found in the following documents: <ul> <li> <a href="impl.html">Implementation notes</a> <li> <a href="table_format.txt">Format of an immutable Table file</a> <li> <a href="log_format.txt">Format of a log file</a> </ul> </body> </html>
zzxiaogx-leveldb
doc/index.html
HTML
bsd
19,272
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // WriteBatch holds a collection of updates to apply atomically to a DB. // // The updates are applied in the order in which they are added // to the WriteBatch. For example, the value of "key" will be "v3" // after the following batch is written: // // batch.Put("key", "v1"); // batch.Delete("key"); // batch.Put("key", "v2"); // batch.Put("key", "v3"); // // Multiple threads can invoke const methods on a WriteBatch without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same WriteBatch must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ #include <string> #include "leveldb/status.h" namespace leveldb { class Slice; class WriteBatch { public: WriteBatch(); ~WriteBatch(); // Store the mapping "key->value" in the database. void Put(const Slice& key, const Slice& value); // If the database contains a mapping for "key", erase it. Else do nothing. void Delete(const Slice& key); // Clear all updates buffered in this batch. void Clear(); // Support for iterating over the contents of a batch. class Handler { public: virtual ~Handler(); virtual void Put(const Slice& key, const Slice& value) = 0; virtual void Delete(const Slice& key) = 0; }; Status Iterate(Handler* handler) const; private: friend class WriteBatchInternal; std::string rep_; // See comment in write_batch.cc for the format of rep_ // Intentionally copyable }; } #endif // STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
zzxiaogx-leveldb
include/leveldb/write_batch.h
C++
bsd
1,826
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ #define STORAGE_LEVELDB_INCLUDE_DB_H_ #include <stdint.h> #include <stdio.h> #include "leveldb/iterator.h" #include "leveldb/options.h" namespace leveldb { static const int kMajorVersion = 1; static const int kMinorVersion = 2; struct Options; struct ReadOptions; struct WriteOptions; class WriteBatch; // Abstract handle to particular state of a DB. // A Snapshot is an immutable object and can therefore be safely // accessed from multiple threads without any external synchronization. class Snapshot { protected: virtual ~Snapshot(); }; // A range of keys struct Range { Slice start; // Included in the range Slice limit; // Not included in the range Range() { } Range(const Slice& s, const Slice& l) : start(s), limit(l) { } }; // A DB is a persistent ordered map from keys to values. // A DB is safe for concurrent access from multiple threads without // any external synchronization. class DB { public: // Open the database with the specified "name". // Stores a pointer to a heap-allocated database in *dbptr and returns // OK on success. // Stores NULL in *dbptr and returns a non-OK status on error. // Caller should delete *dbptr when it is no longer needed. static Status Open(const Options& options, const std::string& name, DB** dbptr); DB() { } virtual ~DB(); // Set the database entry for "key" to "value". Returns OK on success, // and a non-OK status on error. // Note: consider setting options.sync = true. virtual Status Put(const WriteOptions& options, const Slice& key, const Slice& value) = 0; // Remove the database entry (if any) for "key". Returns OK on // success, and a non-OK status on error. It is not an error if "key" // did not exist in the database. // Note: consider setting options.sync = true. virtual Status Delete(const WriteOptions& options, const Slice& key) = 0; // Apply the specified updates to the database. // Returns OK on success, non-OK on failure. // Note: consider setting options.sync = true. virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; // If the database contains an entry for "key" store the // corresponding value in *value and return OK. // // If there is no entry for "key" leave *value unchanged and return // a status for which Status::IsNotFound() returns true. // // May return some other Status on an error. virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) = 0; // Return a heap-allocated iterator over the contents of the database. // The result of NewIterator() is initially invalid (caller must // call one of the Seek methods on the iterator before using it). // // Caller should delete the iterator when it is no longer needed. // The returned iterator should be deleted before this db is deleted. virtual Iterator* NewIterator(const ReadOptions& options) = 0; // Return a handle to the current DB state. Iterators created with // this handle will all observe a stable snapshot of the current DB // state. The caller must call ReleaseSnapshot(result) when the // snapshot is no longer needed. virtual const Snapshot* GetSnapshot() = 0; // Release a previously acquired snapshot. The caller must not // use "snapshot" after this call. virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; // DB implementations can export properties about their state // via this method. If "property" is a valid property understood by this // DB implementation, fills "*value" with its current value and returns // true. Otherwise returns false. // // // Valid property names include: // // "leveldb.num-files-at-level<N>" - return the number of files at level <N>, // where <N> is an ASCII representation of a level number (e.g. "0"). // "leveldb.stats" - returns a multi-line string that describes statistics // about the internal operation of the DB. virtual bool GetProperty(const Slice& property, std::string* value) = 0; // For each i in [0,n-1], store in "sizes[i]", the approximate // file system space used by keys in "[range[i].start .. range[i].limit)". // // Note that the returned sizes measure file system space usage, so // if the user data compresses by a factor of ten, the returned // sizes will be one-tenth the size of the corresponding user data size. // // The results may not include the sizes of recently written data. virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) = 0; // Possible extensions: // (1) Add a method to compact a range of keys private: // No copying allowed DB(const DB&); void operator=(const DB&); }; // Destroy the contents of the specified database. // Be very careful using this method. Status DestroyDB(const std::string& name, const Options& options); // If a DB cannot be opened, you may attempt to call this method to // resurrect as much of the contents of the database as possible. // Some data may be lost, so be careful when calling this function // on a database that contains important information. Status RepairDB(const std::string& dbname, const Options& options); } #endif // STORAGE_LEVELDB_INCLUDE_DB_H_
zzxiaogx-leveldb
include/leveldb/db.h
C++
bsd
5,627
/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. See the AUTHORS file for names of contributors. C bindings for leveldb. May be useful as a stable ABI that can be used by programs that keep leveldb in a shared library, or for a JNI api. Does not support: . getters for the option types . custom comparators that implement key shortening . capturing post-write-snapshot . custom iter, db, env, cache implementations using just the C bindings Some conventions: (1) We expose just opaque struct pointers and functions to clients. This allows us to change internal representations without having to recompile clients. (2) For simplicity, there is no equivalent to the Slice type. Instead, the caller has to pass the pointer and length as separate arguments. (3) Errors are represented by a null-terminated c string. NULL means no error. All operations that can raise an error are passed a "char** errptr" as the last argument. One of the following must be true on entry: *errptr == NULL *errptr points to a malloc()ed null-terminated error message On success, a leveldb routine leaves *errptr unchanged. On failure, leveldb frees the old value of *errptr and set *errptr to a malloc()ed error message. (4) Bools have the type unsigned char (0 == false; rest == true) (5) All of the pointer arguments must be non-NULL. */ #ifndef STORAGE_LEVELDB_INCLUDE_C_H_ #define STORAGE_LEVELDB_INCLUDE_C_H_ #ifdef __cplusplus extern "C" { #endif #include <stdarg.h> #include <stddef.h> #include <stdint.h> /* Exported types */ typedef struct leveldb_t leveldb_t; typedef struct leveldb_cache_t leveldb_cache_t; typedef struct leveldb_comparator_t leveldb_comparator_t; typedef struct leveldb_env_t leveldb_env_t; typedef struct leveldb_filelock_t leveldb_filelock_t; typedef struct leveldb_iterator_t leveldb_iterator_t; typedef struct leveldb_logger_t leveldb_logger_t; typedef struct leveldb_options_t leveldb_options_t; typedef struct leveldb_randomfile_t leveldb_randomfile_t; typedef struct leveldb_readoptions_t leveldb_readoptions_t; typedef struct leveldb_seqfile_t leveldb_seqfile_t; typedef struct leveldb_snapshot_t leveldb_snapshot_t; typedef struct leveldb_writablefile_t leveldb_writablefile_t; typedef struct leveldb_writebatch_t leveldb_writebatch_t; typedef struct leveldb_writeoptions_t leveldb_writeoptions_t; /* DB operations */ extern leveldb_t* leveldb_open( const leveldb_options_t* options, const char* name, char** errptr); extern void leveldb_close(leveldb_t* db); extern void leveldb_put( leveldb_t* db, const leveldb_writeoptions_t* options, const char* key, size_t keylen, const char* val, size_t vallen, char** errptr); extern void leveldb_delete( leveldb_t* db, const leveldb_writeoptions_t* options, const char* key, size_t keylen, char** errptr); extern void leveldb_write( leveldb_t* db, const leveldb_writeoptions_t* options, leveldb_writebatch_t* batch, char** errptr); /* Returns NULL if not found. A malloc()ed array otherwise. Stores the length of the array in *vallen. */ extern char* leveldb_get( leveldb_t* db, const leveldb_readoptions_t* options, const char* key, size_t keylen, size_t* vallen, char** errptr); extern leveldb_iterator_t* leveldb_create_iterator( leveldb_t* db, const leveldb_readoptions_t* options); extern const leveldb_snapshot_t* leveldb_create_snapshot( leveldb_t* db); extern void leveldb_release_snapshot( leveldb_t* db, const leveldb_snapshot_t* snapshot); /* Returns NULL if property name is unknown. Else returns a pointer to a malloc()-ed null-terminated value. */ extern char* leveldb_property_value( leveldb_t* db, const char* propname); extern void leveldb_approximate_sizes( leveldb_t* db, int num_ranges, const char* const* range_start_key, const size_t* range_start_key_len, const char* const* range_limit_key, const size_t* range_limit_key_len, uint64_t* sizes); /* Management operations */ extern void leveldb_destroy_db( const leveldb_options_t* options, const char* name, char** errptr); extern void leveldb_repair_db( const leveldb_options_t* options, const char* name, char** errptr); /* Iterator */ extern void leveldb_iter_destroy(leveldb_iterator_t*); extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*); extern void leveldb_iter_seek_to_first(leveldb_iterator_t*); extern void leveldb_iter_seek_to_last(leveldb_iterator_t*); extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen); extern void leveldb_iter_next(leveldb_iterator_t*); extern void leveldb_iter_prev(leveldb_iterator_t*); extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen); extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen); extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr); /* Write batch */ extern leveldb_writebatch_t* leveldb_writebatch_create(); extern void leveldb_writebatch_destroy(leveldb_writebatch_t*); extern void leveldb_writebatch_clear(leveldb_writebatch_t*); extern void leveldb_writebatch_put( leveldb_writebatch_t*, const char* key, size_t klen, const char* val, size_t vlen); extern void leveldb_writebatch_delete( leveldb_writebatch_t*, const char* key, size_t klen); extern void leveldb_writebatch_iterate( leveldb_writebatch_t*, void* state, void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), void (*deleted)(void*, const char* k, size_t klen)); /* Options */ extern leveldb_options_t* leveldb_options_create(); extern void leveldb_options_destroy(leveldb_options_t*); extern void leveldb_options_set_comparator( leveldb_options_t*, leveldb_comparator_t*); extern void leveldb_options_set_create_if_missing( leveldb_options_t*, unsigned char); extern void leveldb_options_set_error_if_exists( leveldb_options_t*, unsigned char); extern void leveldb_options_set_paranoid_checks( leveldb_options_t*, unsigned char); extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*); extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*); extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t); extern void leveldb_options_set_max_open_files(leveldb_options_t*, int); extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*); extern void leveldb_options_set_block_size(leveldb_options_t*, size_t); extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int); enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 }; extern void leveldb_options_set_compression(leveldb_options_t*, int); /* Comparator */ extern leveldb_comparator_t* leveldb_comparator_create( void* state, void (*destructor)(void*), int (*compare)( void*, const char* a, size_t alen, const char* b, size_t blen), const char* (*name)(void*)); extern void leveldb_comparator_destroy(leveldb_comparator_t*); /* Read options */ extern leveldb_readoptions_t* leveldb_readoptions_create(); extern void leveldb_readoptions_destroy(leveldb_readoptions_t*); extern void leveldb_readoptions_set_verify_checksums( leveldb_readoptions_t*, unsigned char); extern void leveldb_readoptions_set_fill_cache( leveldb_readoptions_t*, unsigned char); extern void leveldb_readoptions_set_snapshot( leveldb_readoptions_t*, const leveldb_snapshot_t*); /* Write options */ extern leveldb_writeoptions_t* leveldb_writeoptions_create(); extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*); extern void leveldb_writeoptions_set_sync( leveldb_writeoptions_t*, unsigned char); /* Cache */ extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity); extern void leveldb_cache_destroy(leveldb_cache_t* cache); /* Env */ extern leveldb_env_t* leveldb_create_default_env(); extern void leveldb_env_destroy(leveldb_env_t*); #ifdef __cplusplus } /* end extern "C" */ #endif #endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
zzxiaogx-leveldb
include/leveldb/c.h
C
bsd
8,369
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Slice is a simple structure containing a pointer into some external // storage and a size. The user of a Slice must ensure that the slice // is not used after the corresponding external storage has been // deallocated. // // Multiple threads can invoke const methods on a Slice without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Slice must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #include <assert.h> #include <stddef.h> #include <string.h> #include <string> namespace leveldb { class Slice { public: // Create an empty slice. Slice() : data_(""), size_(0) { } // Create a slice that refers to d[0,n-1]. Slice(const char* d, size_t n) : data_(d), size_(n) { } // Create a slice that refers to the contents of "s" Slice(const std::string& s) : data_(s.data()), size_(s.size()) { } // Create a slice that refers to s[0,strlen(s)-1] Slice(const char* s) : data_(s), size_(strlen(s)) { } // Return a pointer to the beginning of the referenced data const char* data() const { return data_; } // Return the length (in bytes) of the referenced data size_t size() const { return size_; } // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { assert(n < size()); return data_[n]; } // Change this slice to refer to an empty array void clear() { data_ = ""; size_ = 0; } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { assert(n <= size()); data_ += n; size_ -= n; } // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", // == 0 iff "*this" == "b", // > 0 iff "*this" > "b" int compare(const Slice& b) const; // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice& x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char* data_; size_t size_; // Intentionally copyable }; inline bool operator==(const Slice& x, const Slice& y) { return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); } inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } inline int Slice::compare(const Slice& b) const { const int min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; else if (size_ > b.size_) r = +1; } return r; } } #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
zzxiaogx-leveldb
include/leveldb/slice.h
C++
bsd
3,106
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_H_ #include <stdint.h> #include "leveldb/iterator.h" namespace leveldb { class Block; class BlockHandle; struct Options; class RandomAccessFile; struct ReadOptions; // A Table is a sorted map from strings to strings. Tables are // immutable and persistent. A Table may be safely accessed from // multiple threads without external synchronization. class Table { public: // Attempt to open the table that is stored in bytes [0..file_size) // of "file", and read the metadata entries necessary to allow // retrieving data from the table. // // If successful, returns ok and sets "*table" to the newly opened // table. The client should delete "*table" when no longer needed. // If there was an error while initializing the table, sets "*table" // to NULL and returns a non-ok status. Does not take ownership of // "*source", but the client must ensure that "source" remains live // for the duration of the returned table's lifetime. // // *file must remain live while this Table is in use. static Status Open(const Options& options, RandomAccessFile* file, uint64_t file_size, Table** table); ~Table(); // Returns a new iterator over the table contents. // The result of NewIterator() is initially invalid (caller must // call one of the Seek methods on the iterator before using it). Iterator* NewIterator(const ReadOptions&) const; // Given a key, return an approximate byte offset in the file where // the data for that key begins (or would begin if the key were // present in the file). The returned value is in terms of file // bytes, and so includes effects like compression of the underlying data. // E.g., the approximate offset of the last key in the table will // be close to the file length. uint64_t ApproximateOffsetOf(const Slice& key) const; private: struct Rep; Rep* rep_; explicit Table(Rep* rep) { rep_ = rep; } static Iterator* BlockReader(void*, const ReadOptions&, const Slice&); // No copying allowed Table(const Table&); void operator=(const Table&); }; } #endif // STORAGE_LEVELDB_INCLUDE_TABLE_H_
zzxiaogx-leveldb
include/leveldb/table.h
C++
bsd
2,462
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ #include <stddef.h> namespace leveldb { class Cache; class Comparator; class Env; class Logger; class Snapshot; // DB contents are stored in a set of blocks, each of which holds a // sequence of key,value pairs. Each block may be compressed before // being stored in a file. The following enum describes which // compression method (if any) is used to compress a block. enum CompressionType { // NOTE: do not change the values of existing entries, as these are // part of the persistent format on disk. kNoCompression = 0x0, kSnappyCompression = 0x1 }; // Options to control the behavior of a database (passed to DB::Open) struct Options { // ------------------- // Parameters that affect behavior // Comparator used to define the order of keys in the table. // Default: a comparator that uses lexicographic byte-wise ordering // // REQUIRES: The client must ensure that the comparator supplied // here has the same name and orders keys *exactly* the same as the // comparator provided to previous open calls on the same DB. const Comparator* comparator; // If true, the database will be created if it is missing. // Default: false bool create_if_missing; // If true, an error is raised if the database already exists. // Default: false bool error_if_exists; // If true, the implementation will do aggressive checking of the // data it is processing and will stop early if it detects any // errors. This may have unforeseen ramifications: for example, a // corruption of one DB entry may cause a large number of entries to // become unreadable or for the entire DB to become unopenable. // Default: false bool paranoid_checks; // Use the specified object to interact with the environment, // e.g. to read/write files, schedule background work, etc. // Default: Env::Default() Env* env; // Any internal progress/error information generated by the db will // be written to info_log if it is non-NULL, or to a file stored // in the same directory as the DB contents if info_log is NULL. // Default: NULL Logger* info_log; // ------------------- // Parameters that affect performance // Amount of data to build up in memory (backed by an unsorted log // on disk) before converting to a sorted on-disk file. // // Larger values increase performance, especially during bulk loads. // Up to two write buffers may be held in memory at the same time, // so you may wish to adjust this parameter to control memory usage. // Also, a larger write buffer will result in a longer recovery time // the next time the database is opened. // // Default: 4MB size_t write_buffer_size; // Number of open files that can be used by the DB. You may need to // increase this if your database has a large working set (budget // one open file per 2MB of working set). // // Default: 1000 int max_open_files; // Control over blocks (user data is stored in a set of blocks, and // a block is the unit of reading from disk). // If non-NULL, use the specified cache for blocks. // If NULL, leveldb will automatically create and use an 8MB internal cache. // Default: NULL Cache* block_cache; // Approximate size of user data packed per block. Note that the // block size specified here corresponds to uncompressed data. The // actual size of the unit read from disk may be smaller if // compression is enabled. This parameter can be changed dynamically. // // Default: 4K size_t block_size; // Number of keys between restart points for delta encoding of keys. // This parameter can be changed dynamically. Most clients should // leave this parameter alone. // // Default: 16 int block_restart_interval; // Compress blocks using the specified compression algorithm. This // parameter can be changed dynamically. // // Default: kSnappyCompression, which gives lightweight but fast // compression. // // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: // ~200-500MB/s compression // ~400-800MB/s decompression // Note that these speeds are significantly faster than most // persistent storage speeds, and therefore it is typically never // worth switching to kNoCompression. Even if the input data is // incompressible, the kSnappyCompression implementation will // efficiently detect that and will switch to uncompressed mode. CompressionType compression; // Create an Options object with default values for all fields. Options(); }; // Options that control read operations struct ReadOptions { // If true, all data read from underlying storage will be // verified against corresponding checksums. // Default: false bool verify_checksums; // Should the data read for this iteration be cached in memory? // Callers may wish to set this field to false for bulk scans. // Default: true bool fill_cache; // If "snapshot" is non-NULL, read as of the supplied snapshot // (which must belong to the DB that is being read and which must // not have been released). If "snapshot" is NULL, use an impliicit // snapshot of the state at the beginning of this read operation. // Default: NULL const Snapshot* snapshot; ReadOptions() : verify_checksums(false), fill_cache(true), snapshot(NULL) { } }; // Options that control write operations struct WriteOptions { // If true, the write will be flushed from the operating system // buffer cache (by calling WritableFile::Sync()) before the write // is considered complete. If this flag is true, writes will be // slower. // // If this flag is false, and the machine crashes, some recent // writes may be lost. Note that if it is just the process that // crashes (i.e., the machine does not reboot), no writes will be // lost even if sync==false. // // In other words, a DB write with sync==false has similar // crash semantics as the "write()" system call. A DB write // with sync==true has similar crash semantics to a "write()" // system call followed by "fsync()". // // Default: false bool sync; // If "post_write_snapshot" is non-NULL, and the write succeeds, // *post_write_snapshot will be modified to point to a snapshot of // the DB state immediately after this write. The caller must call // DB::ReleaseSnapshot(*post_write_snapshotsnapshot) when the // snapshot is no longer needed. // // If "post_write_snapshot" is non-NULL, and the write fails, // *post_write_snapshot will be set to NULL. // // Default: NULL const Snapshot** post_write_snapshot; WriteOptions() : sync(false), post_write_snapshot(NULL) { } }; } #endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
zzxiaogx-leveldb
include/leveldb/options.h
C++
bsd
7,063
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ #define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ #include <string> namespace leveldb { class Slice; // A Comparator object provides a total order across slices that are // used as keys in an sstable or a database. A Comparator implementation // must be thread-safe since leveldb may invoke its methods concurrently // from multiple threads. class Comparator { public: virtual ~Comparator(); // Three-way comparison. Returns value: // < 0 iff "a" < "b", // == 0 iff "a" == "b", // > 0 iff "a" > "b" virtual int Compare(const Slice& a, const Slice& b) const = 0; // The name of the comparator. Used to check for comparator // mismatches (i.e., a DB created with one comparator is // accessed using a different comparator. // // The client of this package should switch to a new name whenever // the comparator implementation changes in a way that will cause // the relative ordering of any two keys to change. // // Names starting with "leveldb." are reserved and should not be used // by any clients of this package. virtual const char* Name() const = 0; // Advanced functions: these are used to reduce the space requirements // for internal data structures like index blocks. // If *start < limit, changes *start to a short string in [start,limit). // Simple comparator implementations may return with *start unchanged, // i.e., an implementation of this method that does nothing is correct. virtual void FindShortestSeparator( std::string* start, const Slice& limit) const = 0; // Changes *key to a short string >= *key. // Simple comparator implementations may return with *key unchanged, // i.e., an implementation of this method that does nothing is correct. virtual void FindShortSuccessor(std::string* key) const = 0; }; // Return a builtin comparator that uses lexicographic byte-wise // ordering. The result remains the property of this module and // must not be deleted. extern const Comparator* BytewiseComparator(); } #endif // STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
zzxiaogx-leveldb
include/leveldb/comparator.h
C++
bsd
2,327
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // An iterator yields a sequence of key/value pairs from a source. // The following class defines the interface. Multiple implementations // are provided by this library. In particular, iterators are provided // to access the contents of a Table or a DB. // // Multiple threads can invoke const methods on an Iterator without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Iterator must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ #define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ #include "leveldb/slice.h" #include "leveldb/status.h" namespace leveldb { class Iterator { public: Iterator(); virtual ~Iterator(); // An iterator is either positioned at a key/value pair, or // not valid. This method returns true iff the iterator is valid. virtual bool Valid() const = 0; // Position at the first key in the source. The iterator is Valid() // after this call iff the source is not empty. virtual void SeekToFirst() = 0; // Position at the last key in the source. The iterator is // Valid() after this call iff the source is not empty. virtual void SeekToLast() = 0; // Position at the first key in the source that at or past target // The iterator is Valid() after this call iff the source contains // an entry that comes at or past target. virtual void Seek(const Slice& target) = 0; // Moves to the next entry in the source. After this call, Valid() is // true iff the iterator was not positioned at the last entry in the source. // REQUIRES: Valid() virtual void Next() = 0; // Moves to the previous entry in the source. After this call, Valid() is // true iff the iterator was not positioned at the first entry in source. // REQUIRES: Valid() virtual void Prev() = 0; // Return the key for the current entry. The underlying storage for // the returned slice is valid only until the next modification of // the iterator. // REQUIRES: Valid() virtual Slice key() const = 0; // Return the value for the current entry. The underlying storage for // the returned slice is valid only until the next modification of // the iterator. // REQUIRES: !AtEnd() && !AtStart() virtual Slice value() const = 0; // If an error has occurred, return it. Else return an ok status. virtual Status status() const = 0; // Clients are allowed to register function/arg1/arg2 triples that // will be invoked when this iterator is destroyed. // // Note that unlike all of the preceding methods, this method is // not abstract and therefore clients should not override it. typedef void (*CleanupFunction)(void* arg1, void* arg2); void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); private: struct Cleanup { CleanupFunction function; void* arg1; void* arg2; Cleanup* next; }; Cleanup cleanup_; // No copying allowed Iterator(const Iterator&); void operator=(const Iterator&); }; // Return an empty iterator (yields nothing). extern Iterator* NewEmptyIterator(); // Return an empty iterator with the specified status. extern Iterator* NewErrorIterator(const Status& status); } #endif // STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
zzxiaogx-leveldb
include/leveldb/iterator.h
C++
bsd
3,480
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // An Env is an interface used by the leveldb implementation to access // operating system functionality like the filesystem etc. Callers // may wish to provide a custom Env object when opening a database to // get fine gain control; e.g., to rate limit file system operations. // // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ #include <cstdarg> #include <string> #include <vector> #include <stdint.h> #include "leveldb/status.h" namespace leveldb { class FileLock; class Logger; class RandomAccessFile; class SequentialFile; class Slice; class WritableFile; class Env { public: Env() { } virtual ~Env(); // Return a default environment suitable for the current operating // system. Sophisticated users may wish to provide their own Env // implementation instead of relying on this default environment. // // The result of Default() belongs to leveldb and must never be deleted. static Env* Default(); // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. // On failure stores NULL in *result and returns non-OK. If the file does // not exist, returns a non-OK status. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string& fname, SequentialFile** result) = 0; // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result) = 0; // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. virtual Status NewWritableFile(const std::string& fname, WritableFile** result) = 0; // Returns true iff the named file exists. virtual bool FileExists(const std::string& fname) = 0; // Store in *result the names of the children of the specified directory. // The names are relative to "dir". // Original contents of *results are dropped. virtual Status GetChildren(const std::string& dir, std::vector<std::string>* result) = 0; // Delete the named file. virtual Status DeleteFile(const std::string& fname) = 0; // Create the specified directory. virtual Status CreateDir(const std::string& dirname) = 0; // Delete the specified directory. virtual Status DeleteDir(const std::string& dirname) = 0; // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; // Rename file src to target. virtual Status RenameFile(const std::string& src, const std::string& target) = 0; // Lock the specified file. Used to prevent concurrent access to // the same db by multiple processes. On failure, stores NULL in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the // acquired lock in *lock and returns OK. The caller should call // UnlockFile(*lock) to release the lock. If the process exits, // the lock will be automatically released. // // If somebody else already holds the lock, finishes immediately // with a failure. I.e., this call does not wait for existing locks // to go away. // // May create the named file if it does not already exist. virtual Status LockFile(const std::string& fname, FileLock** lock) = 0; // Release the lock acquired by a previous successful call to LockFile. // REQUIRES: lock was returned by a successful LockFile() call // REQUIRES: lock has not already been unlocked. virtual Status UnlockFile(FileLock* lock) = 0; // Arrange to run "(*function)(arg)" once in a background thread. // // "function" may run in an unspecified thread. Multiple functions // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. virtual void Schedule( void (*function)(void* arg), void* arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void* arg), void* arg) = 0; // *path is set to a temporary directory that can be used for testing. It may // or many not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string* path) = 0; // Create and return a log file for storing informational messages. virtual Status NewLogger(const std::string& fname, Logger** result) = 0; // Returns the number of micro-seconds since some fixed point in time. Only // useful for computing deltas of time. virtual uint64_t NowMicros() = 0; // Sleep/delay the thread for the perscribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; private: // No copying allowed Env(const Env&); void operator=(const Env&); }; // A file abstraction for reading sequentially through a file class SequentialFile { public: SequentialFile() { } virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be // written by this routine. Sets "*result" to the data that was // read (including if fewer than "n" bytes were successfully read). // If an error was encountered, returns a non-OK status. // // REQUIRES: External synchronization virtual Status Read(size_t n, Slice* result, char* scratch) = 0; // Skip "n" bytes from the file. This is guaranteed to be no // slower that reading the same data, but may be faster. // // If end of file is reached, skipping will stop at the end of the // file, and Skip will return OK. // // REQUIRES: External synchronization virtual Status Skip(uint64_t n) = 0; }; // A file abstraction for randomly reading the contents of a file. class RandomAccessFile { public: RandomAccessFile() { } virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". // "scratch[0..n-1]" may be written by this routine. Sets "*result" // to the data that was read (including if fewer than "n" bytes were // successfully read). If an error was encountered, returns a // non-OK status. // // Safe for concurrent use by multiple threads. virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const = 0; }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. class WritableFile { public: WritableFile() { } virtual ~WritableFile(); virtual Status Append(const Slice& data) = 0; virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; private: // No copying allowed WritableFile(const WritableFile&); void operator=(const WritableFile&); }; // An interface for writing log messages. class Logger { public: Logger() { } virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char* format, va_list ap) = 0; private: // No copying allowed Logger(const Logger&); void operator=(const Logger&); }; // Identifies a locked file. class FileLock { public: FileLock() { } virtual ~FileLock(); private: // No copying allowed FileLock(const FileLock&); void operator=(const FileLock&); }; // Log the specified data to *info_log if info_log is non-NULL. extern void Log(Logger* info_log, const char* format, ...) # if defined(__GNUC__) || defined(__clang__) __attribute__((__format__ (__printf__, 2, 3))) # endif ; // A utility routine: write "data" to the named file. extern Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname); // A utility routine: read contents of named file into *data extern Status ReadFileToString(Env* env, const std::string& fname, std::string* data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. class EnvWrapper : public Env { public: // Initialize an EnvWrapper that delegates all calls to *t explicit EnvWrapper(Env* t) : target_(t) { } virtual ~EnvWrapper(); // Return the target to which this Env forwards all calls Env* target() const { return target_; } // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string& f, SequentialFile** r) { return target_->NewSequentialFile(f, r); } Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) { return target_->NewRandomAccessFile(f, r); } Status NewWritableFile(const std::string& f, WritableFile** r) { return target_->NewWritableFile(f, r); } bool FileExists(const std::string& f) { return target_->FileExists(f); } Status GetChildren(const std::string& dir, std::vector<std::string>* r) { return target_->GetChildren(dir, r); } Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); } Status CreateDir(const std::string& d) { return target_->CreateDir(d); } Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); } Status GetFileSize(const std::string& f, uint64_t* s) { return target_->GetFileSize(f, s); } Status RenameFile(const std::string& s, const std::string& t) { return target_->RenameFile(s, t); } Status LockFile(const std::string& f, FileLock** l) { return target_->LockFile(f, l); } Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); } void Schedule(void (*f)(void*), void* a) { return target_->Schedule(f, a); } void StartThread(void (*f)(void*), void* a) { return target_->StartThread(f, a); } virtual Status GetTestDirectory(std::string* path) { return target_->GetTestDirectory(path); } virtual Status NewLogger(const std::string& fname, Logger** result) { return target_->NewLogger(fname, result); } uint64_t NowMicros() { return target_->NowMicros(); } void SleepForMicroseconds(int micros) { target_->SleepForMicroseconds(micros); } private: Env* target_; }; } #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
zzxiaogx-leveldb
include/leveldb/env.h
C++
bsd
11,443
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // TableBuilder provides the interface used to build a Table // (an immutable and sorted map from keys to values). // // Multiple threads can invoke const methods on a TableBuilder without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same TableBuilder must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #include <stdint.h> #include "leveldb/options.h" #include "leveldb/status.h" namespace leveldb { class BlockBuilder; class BlockHandle; class WritableFile; class TableBuilder { public: // Create a builder that will store the contents of the table it is // building in *file. Does not close the file. It is up to the // caller to close the file after calling Finish(). TableBuilder(const Options& options, WritableFile* file); // REQUIRES: Either Finish() or Abandon() has been called. ~TableBuilder(); // Change the options used by this builder. Note: only some of the // option fields can be changed after construction. If a field is // not allowed to change dynamically and its value in the structure // passed to the constructor is different from its value in the // structure passed to this method, this method will return an error // without changing any fields. Status ChangeOptions(const Options& options); // Add key,value to the table being constructed. // REQUIRES: key is after any previously added key according to comparator. // REQUIRES: Finish(), Abandon() have not been called void Add(const Slice& key, const Slice& value); // Advanced operation: flush any buffered key/value pairs to file. // Can be used to ensure that two adjacent entries never live in // the same data block. Most clients should not need to use this method. // REQUIRES: Finish(), Abandon() have not been called void Flush(); // Return non-ok iff some error has been detected. Status status() const; // Finish building the table. Stops using the file passed to the // constructor after this function returns. // REQUIRES: Finish(), Abandon() have not been called Status Finish(); // Indicate that the contents of this builder should be abandoned. Stops // using the file passed to the constructor after this function returns. // If the caller is not going to call Finish(), it must call Abandon() // before destroying this builder. // REQUIRES: Finish(), Abandon() have not been called void Abandon(); // Number of calls to Add() so far. uint64_t NumEntries() const; // Size of the file generated so far. If invoked after a successful // Finish() call, returns the size of the final generated file. uint64_t FileSize() const; private: bool ok() const { return status().ok(); } void WriteBlock(BlockBuilder* block, BlockHandle* handle); struct Rep; Rep* rep_; // No copying allowed TableBuilder(const TableBuilder&); void operator=(const TableBuilder&); }; } #endif // STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
zzxiaogx-leveldb
include/leveldb/table_builder.h
C++
bsd
3,294
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // A Cache is an interface that maps keys to values. It has internal // synchronization and may be safely accessed concurrently from // multiple threads. It may automatically evict entries to make room // for new entries. Values have a specified charge against the cache // capacity. For example, a cache where the values are variable // length strings, may use the length of the string as the charge for // the string. // // A builtin cache implementation with a least-recently-used eviction // policy is provided. Clients may use their own implementations if // they want something more sophisticated (like scan-resistance, a // custom eviction policy, variable cache sizing, etc.) #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_ #define STORAGE_LEVELDB_INCLUDE_CACHE_H_ #include <stdint.h> #include "leveldb/slice.h" namespace leveldb { class Cache; // Create a new cache with a fixed size capacity. This implementation // of Cache uses a least-recently-used eviction policy. extern Cache* NewLRUCache(size_t capacity); class Cache { public: Cache() { } // Destroys all existing entries by calling the "deleter" // function that was passed to the constructor. virtual ~Cache(); // Opaque handle to an entry stored in the cache. struct Handle { }; // Insert a mapping from key->value into the cache and assign it // the specified charge against the total cache capacity. // // Returns a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no // longer needed. // // When the inserted entry is no longer needed, the key and // value will be passed to "deleter". virtual Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) = 0; // If the cache has no mapping for "key", returns NULL. // // Else return a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no // longer needed. virtual Handle* Lookup(const Slice& key) = 0; // Release a mapping returned by a previous Lookup(). // REQUIRES: handle must not have been released yet. // REQUIRES: handle must have been returned by a method on *this. virtual void Release(Handle* handle) = 0; // Return the value encapsulated in a handle returned by a // successful Lookup(). // REQUIRES: handle must not have been released yet. // REQUIRES: handle must have been returned by a method on *this. virtual void* Value(Handle* handle) = 0; // If the cache contains entry for key, erase it. Note that the // underlying entry will be kept around until all existing handles // to it have been released. virtual void Erase(const Slice& key) = 0; // Return a new numeric id. May be used by multiple clients who are // sharing the same cache to partition the key space. Typically the // client will allocate a new id at startup and prepend the id to // its cache keys. virtual uint64_t NewId() = 0; private: void LRU_Remove(Handle* e); void LRU_Append(Handle* e); void Unref(Handle* e); struct Rep; Rep* rep_; // No copying allowed Cache(const Cache&); void operator=(const Cache&); }; } #endif // STORAGE_LEVELDB_UTIL_CACHE_H_
zzxiaogx-leveldb
include/leveldb/cache.h
C++
bsd
3,512
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // A Status encapsulates the result of an operation. It may indicate success, // or it may indicate an error with an associated error message. // // Multiple threads can invoke const methods on a Status without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Status must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ #include <string> #include "leveldb/slice.h" namespace leveldb { class Status { public: // Create a success status. Status() : state_(NULL) { } ~Status() { delete[] state_; } // Copy the specified status. Status(const Status& s); void operator=(const Status& s); // Return a success status. static Status OK() { return Status(); } // Return error status of an appropriate type. static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kNotFound, msg, msg2); } static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kCorruption, msg, msg2); } static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kInvalidArgument, msg, msg2); } static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kIOError, msg, msg2); } // Returns true iff the status indicates success. bool ok() const { return (state_ == NULL); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; private: // OK status has a NULL state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code // state_[5..] == message const char* state_; enum Code { kOk = 0, kNotFound = 1, kCorruption = 2, kNotSupported = 3, kInvalidArgument = 4, kIOError = 5 }; Code code() const { return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice& msg, const Slice& msg2); static const char* CopyState(const char* s); }; inline Status::Status(const Status& s) { state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } inline void Status::operator=(const Status& s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } } } #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
zzxiaogx-leveldb
include/leveldb/status.h
C++
bsd
3,090
#!/bin/sh # Detects OS we're compiling on and generates build_config.mk, # which in turn gets read while processing Makefile. # build_config.mk will set the following variables: # - PORT_CFLAGS will either set: # -DLEVELDB_PLATFORM_POSIX if cstatomic is present # -DLEVELDB_PLATFORM_NOATOMIC if it is not # - PLATFORM_CFLAGS with compiler flags for the platform # - PLATFORM_LDFLAGS with linker flags for the platform # Delete existing build_config.mk rm -f build_config.mk # Detect OS case `uname -s` in Darwin) PLATFORM=OS_MACOSX echo "PLATFORM_CFLAGS=-DOS_MACOSX" >> build_config.mk echo "PLATFORM_LDFLAGS=" >> build_config.mk ;; Linux) PLATFORM=OS_LINUX echo "PLATFORM_CFLAGS=-pthread -DOS_LINUX" >> build_config.mk echo "PLATFORM_LDFLAGS=-lpthread" >> build_config.mk ;; SunOS) PLATFORM=OS_SOLARIS echo "PLATFORM_CFLAGS=-D_REENTRANT -DOS_SOLARIS" >> build_config.mk echo "PLATFORM_LDFLAGS=-lpthread -lrt" >> build_config.mk ;; FreeBSD) PLATFORM=OS_FREEBSD echo "PLATFORM_CFLAGS=-D_REENTRANT -DOS_FREEBSD" >> build_config.mk echo "PLATFORM_LDFLAGS=-lpthread" >> build_config.mk ;; GNU/kFreeBSD) PLATFORM=OS_FREEBSD echo "PLATFORM_CFLAGS=-pthread -DOS_FREEBSD" >> build_config.mk echo "PLATFORM_LDFLAGS=-lpthread -lrt" >> build_config.mk ;; *) echo "Unknown platform!" exit 1 esac echo "PLATFORM=$PLATFORM" >> build_config.mk # On GCC, use libc's memcmp, not GCC's memcmp PORT_CFLAGS="-fno-builtin-memcmp" # Detect C++0x -- this determines whether we'll use port_noatomic.h # or port_posix.h by: # 1. Rrying to compile with -std=c++0x and including <cstdatomic>. # 2. If g++ returns error code, we know to use port_posix.h g++ $CFLAGS -std=c++0x -x c++ - -o /dev/null 2>/dev/null <<EOF #include <cstdatomic> int main() {} EOF if [ "$?" = 0 ]; then PORT_CFLAGS="$PORT_CFLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_CSTDATOMIC_PRESENT -std=c++0x" else PORT_CFLAGS="$PORT_CFLAGS -DLEVELDB_PLATFORM_POSIX" fi # Test whether Snappy library is installed # http://code.google.com/p/snappy/ g++ $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF #include <snappy.h> int main() {} EOF if [ "$?" = 0 ]; then echo "SNAPPY=1" >> build_config.mk else echo "SNAPPY=0" >> build_config.mk fi echo "PORT_CFLAGS=$PORT_CFLAGS" >> build_config.mk
zzxiaogx-leveldb
build_detect_platform
Shell
bsd
2,490
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/merger.h" #include "leveldb/comparator.h" #include "leveldb/iterator.h" #include "table/iterator_wrapper.h" namespace leveldb { namespace { class MergingIterator : public Iterator { public: MergingIterator(const Comparator* comparator, Iterator** children, int n) : comparator_(comparator), children_(new IteratorWrapper[n]), n_(n), current_(NULL), direction_(kForward) { for (int i = 0; i < n; i++) { children_[i].Set(children[i]); } } virtual ~MergingIterator() { delete[] children_; } virtual bool Valid() const { return (current_ != NULL); } virtual void SeekToFirst() { for (int i = 0; i < n_; i++) { children_[i].SeekToFirst(); } FindSmallest(); direction_ = kForward; } virtual void SeekToLast() { for (int i = 0; i < n_; i++) { children_[i].SeekToLast(); } FindLargest(); direction_ = kReverse; } virtual void Seek(const Slice& target) { for (int i = 0; i < n_; i++) { children_[i].Seek(target); } FindSmallest(); direction_ = kForward; } virtual void Next() { assert(Valid()); // Ensure that all children are positioned after key(). // If we are moving in the forward direction, it is already // true for all of the non-current_ children since current_ is // the smallest child and key() == current_->key(). Otherwise, // we explicitly position the non-current_ children. if (direction_ != kForward) { for (int i = 0; i < n_; i++) { IteratorWrapper* child = &children_[i]; if (child != current_) { child->Seek(key()); if (child->Valid() && comparator_->Compare(key(), child->key()) == 0) { child->Next(); } } } direction_ = kForward; } current_->Next(); FindSmallest(); } virtual void Prev() { assert(Valid()); // Ensure that all children are positioned before key(). // If we are moving in the reverse direction, it is already // true for all of the non-current_ children since current_ is // the largest child and key() == current_->key(). Otherwise, // we explicitly position the non-current_ children. if (direction_ != kReverse) { for (int i = 0; i < n_; i++) { IteratorWrapper* child = &children_[i]; if (child != current_) { child->Seek(key()); if (child->Valid()) { // Child is at first entry >= key(). Step back one to be < key() child->Prev(); } else { // Child has no entries >= key(). Position at last entry. child->SeekToLast(); } } } direction_ = kReverse; } current_->Prev(); FindLargest(); } virtual Slice key() const { assert(Valid()); return current_->key(); } virtual Slice value() const { assert(Valid()); return current_->value(); } virtual Status status() const { Status status; for (int i = 0; i < n_; i++) { status = children_[i].status(); if (!status.ok()) { break; } } return status; } private: void FindSmallest(); void FindLargest(); // We might want to use a heap in case there are lots of children. // For now we use a simple array since we expect a very small number // of children in leveldb. const Comparator* comparator_; IteratorWrapper* children_; int n_; IteratorWrapper* current_; // Which direction is the iterator moving? enum Direction { kForward, kReverse }; Direction direction_; }; void MergingIterator::FindSmallest() { IteratorWrapper* smallest = NULL; for (int i = 0; i < n_; i++) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { if (smallest == NULL) { smallest = child; } else if (comparator_->Compare(child->key(), smallest->key()) < 0) { smallest = child; } } } current_ = smallest; } void MergingIterator::FindLargest() { IteratorWrapper* largest = NULL; for (int i = n_-1; i >= 0; i--) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { if (largest == NULL) { largest = child; } else if (comparator_->Compare(child->key(), largest->key()) > 0) { largest = child; } } } current_ = largest; } } Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) { assert(n >= 0); if (n == 0) { return NewEmptyIterator(); } else if (n == 1) { return list[0]; } else { return new MergingIterator(cmp, list, n); } } }
zzxiaogx-leveldb
table/merger.cc
C++
bsd
4,875
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/table_builder.h" #include <assert.h> #include <stdio.h> #include "leveldb/comparator.h" #include "leveldb/env.h" #include "table/block_builder.h" #include "table/format.h" #include "util/coding.h" #include "util/crc32c.h" #include "util/logging.h" namespace leveldb { struct TableBuilder::Rep { Options options; Options index_block_options; WritableFile* file; uint64_t offset; Status status; BlockBuilder data_block; BlockBuilder index_block; std::string last_key; int64_t num_entries; bool closed; // Either Finish() or Abandon() has been called. // We do not emit the index entry for a block until we have seen the // first key for the next data block. This allows us to use shorter // keys in the index block. For example, consider a block boundary // between the keys "the quick brown fox" and "the who". We can use // "the r" as the key for the index block entry since it is >= all // entries in the first block and < all entries in subsequent // blocks. // // Invariant: r->pending_index_entry is true only if data_block is empty. bool pending_index_entry; BlockHandle pending_handle; // Handle to add to index block std::string compressed_output; Rep(const Options& opt, WritableFile* f) : options(opt), index_block_options(opt), file(f), offset(0), data_block(&options), index_block(&index_block_options), num_entries(0), closed(false), pending_index_entry(false) { index_block_options.block_restart_interval = 1; } }; TableBuilder::TableBuilder(const Options& options, WritableFile* file) : rep_(new Rep(options, file)) { } TableBuilder::~TableBuilder() { assert(rep_->closed); // Catch errors where caller forgot to call Finish() delete rep_; } Status TableBuilder::ChangeOptions(const Options& options) { // Note: if more fields are added to Options, update // this function to catch changes that should not be allowed to // change in the middle of building a Table. if (options.comparator != rep_->options.comparator) { return Status::InvalidArgument("changing comparator while building table"); } // Note that any live BlockBuilders point to rep_->options and therefore // will automatically pick up the updated options. rep_->options = options; rep_->index_block_options = options; rep_->index_block_options.block_restart_interval = 1; return Status::OK(); } void TableBuilder::Add(const Slice& key, const Slice& value) { Rep* r = rep_; assert(!r->closed); if (!ok()) return; if (r->num_entries > 0) { assert(r->options.comparator->Compare(key, Slice(r->last_key)) > 0); } if (r->pending_index_entry) { assert(r->data_block.empty()); r->options.comparator->FindShortestSeparator(&r->last_key, key); std::string handle_encoding; r->pending_handle.EncodeTo(&handle_encoding); r->index_block.Add(r->last_key, Slice(handle_encoding)); r->pending_index_entry = false; } r->last_key.assign(key.data(), key.size()); r->num_entries++; r->data_block.Add(key, value); const size_t estimated_block_size = r->data_block.CurrentSizeEstimate(); if (estimated_block_size >= r->options.block_size) { Flush(); } } void TableBuilder::Flush() { Rep* r = rep_; assert(!r->closed); if (!ok()) return; if (r->data_block.empty()) return; assert(!r->pending_index_entry); WriteBlock(&r->data_block, &r->pending_handle); if (ok()) { r->pending_index_entry = true; r->status = r->file->Flush(); } } void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) { // File format contains a sequence of blocks where each block has: // block_data: uint8[n] // type: uint8 // crc: uint32 assert(ok()); Rep* r = rep_; Slice raw = block->Finish(); Slice block_contents; CompressionType type = r->options.compression; // TODO(postrelease): Support more compression options: zlib? switch (type) { case kNoCompression: block_contents = raw; break; case kSnappyCompression: { std::string* compressed = &r->compressed_output; if (port::Snappy_Compress(raw.data(), raw.size(), compressed) && compressed->size() < raw.size() - (raw.size() / 8u)) { block_contents = *compressed; } else { // Snappy not supported, or compressed less than 12.5%, so just // store uncompressed form block_contents = raw; type = kNoCompression; } break; } } handle->set_offset(r->offset); handle->set_size(block_contents.size()); r->status = r->file->Append(block_contents); if (r->status.ok()) { char trailer[kBlockTrailerSize]; trailer[0] = type; uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size()); crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type EncodeFixed32(trailer+1, crc32c::Mask(crc)); r->status = r->file->Append(Slice(trailer, kBlockTrailerSize)); if (r->status.ok()) { r->offset += block_contents.size() + kBlockTrailerSize; } } r->compressed_output.clear(); block->Reset(); } Status TableBuilder::status() const { return rep_->status; } Status TableBuilder::Finish() { Rep* r = rep_; Flush(); assert(!r->closed); r->closed = true; BlockHandle metaindex_block_handle; BlockHandle index_block_handle; if (ok()) { BlockBuilder meta_index_block(&r->options); // TODO(postrelease): Add stats and other meta blocks WriteBlock(&meta_index_block, &metaindex_block_handle); } if (ok()) { if (r->pending_index_entry) { r->options.comparator->FindShortSuccessor(&r->last_key); std::string handle_encoding; r->pending_handle.EncodeTo(&handle_encoding); r->index_block.Add(r->last_key, Slice(handle_encoding)); r->pending_index_entry = false; } WriteBlock(&r->index_block, &index_block_handle); } if (ok()) { Footer footer; footer.set_metaindex_handle(metaindex_block_handle); footer.set_index_handle(index_block_handle); std::string footer_encoding; footer.EncodeTo(&footer_encoding); r->status = r->file->Append(footer_encoding); if (r->status.ok()) { r->offset += footer_encoding.size(); } } return r->status; } void TableBuilder::Abandon() { Rep* r = rep_; assert(!r->closed); r->closed = true; } uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; } uint64_t TableBuilder::FileSize() const { return rep_->offset; } }
zzxiaogx-leveldb
table/table_builder.cc
C++
bsd
6,783
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_ #define STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_ #include "leveldb/iterator.h" namespace leveldb { struct ReadOptions; // Return a new two level iterator. A two-level iterator contains an // index iterator whose values point to a sequence of blocks where // each block is itself a sequence of key,value pairs. The returned // two-level iterator yields the concatenation of all key/value pairs // in the sequence of blocks. Takes ownership of "index_iter" and // will delete it when no longer needed. // // Uses a supplied function to convert an index_iter value into // an iterator over the contents of the corresponding block. extern Iterator* NewTwoLevelIterator( Iterator* index_iter, Iterator* (*block_function)( void* arg, const ReadOptions& options, const Slice& index_value), void* arg, const ReadOptions& options); } #endif // STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_
zzxiaogx-leveldb
table/two_level_iterator.h
C++
bsd
1,201
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // BlockBuilder generates blocks where keys are prefix-compressed: // // When we store a key, we drop the prefix shared with the previous // string. This helps reduce the space requirement significantly. // Furthermore, once every K keys, we do not apply the prefix // compression and store the entire key. We call this a "restart // point". The tail end of the block stores the offsets of all of the // restart points, and can be used to do a binary search when looking // for a particular key. Values are stored as-is (without compression) // immediately following the corresponding key. // // An entry for a particular key-value pair has the form: // shared_bytes: varint32 // unshared_bytes: varint32 // value_length: varint32 // key_delta: char[unshared_bytes] // value: char[value_length] // shared_bytes == 0 for restart points. // // The trailer of the block has the form: // restarts: uint32[num_restarts] // num_restarts: uint32 // restarts[i] contains the offset within the block of the ith restart point. #include "table/block_builder.h" #include <algorithm> #include <assert.h> #include "leveldb/comparator.h" #include "leveldb/table_builder.h" #include "util/coding.h" namespace leveldb { BlockBuilder::BlockBuilder(const Options* options) : options_(options), restarts_(), counter_(0), finished_(false) { assert(options->block_restart_interval >= 1); restarts_.push_back(0); // First restart point is at offset 0 } void BlockBuilder::Reset() { buffer_.clear(); restarts_.clear(); restarts_.push_back(0); // First restart point is at offset 0 counter_ = 0; finished_ = false; last_key_.clear(); } size_t BlockBuilder::CurrentSizeEstimate() const { return (buffer_.size() + // Raw data buffer restarts_.size() * sizeof(uint32_t) + // Restart array sizeof(uint32_t)); // Restart array length } Slice BlockBuilder::Finish() { // Append restart array for (size_t i = 0; i < restarts_.size(); i++) { PutFixed32(&buffer_, restarts_[i]); } PutFixed32(&buffer_, restarts_.size()); finished_ = true; return Slice(buffer_); } void BlockBuilder::Add(const Slice& key, const Slice& value) { Slice last_key_piece(last_key_); assert(!finished_); assert(counter_ <= options_->block_restart_interval); assert(buffer_.empty() // No values yet? || options_->comparator->Compare(key, last_key_piece) > 0); size_t shared = 0; if (counter_ < options_->block_restart_interval) { // See how much sharing to do with previous string const size_t min_length = std::min(last_key_piece.size(), key.size()); while ((shared < min_length) && (last_key_piece[shared] == key[shared])) { shared++; } } else { // Restart compression restarts_.push_back(buffer_.size()); counter_ = 0; } const size_t non_shared = key.size() - shared; // Add "<shared><non_shared><value_size>" to buffer_ PutVarint32(&buffer_, shared); PutVarint32(&buffer_, non_shared); PutVarint32(&buffer_, value.size()); // Add string delta to buffer_ followed by value buffer_.append(key.data() + shared, non_shared); buffer_.append(value.data(), value.size()); // Update state last_key_.resize(shared); last_key_.append(key.data() + shared, non_shared); assert(Slice(last_key_) == key); counter_++; } }
zzxiaogx-leveldb
table/block_builder.cc
C++
bsd
3,634
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/iterator.h" namespace leveldb { Iterator::Iterator() { cleanup_.function = NULL; cleanup_.next = NULL; } Iterator::~Iterator() { if (cleanup_.function != NULL) { (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); for (Cleanup* c = cleanup_.next; c != NULL; ) { (*c->function)(c->arg1, c->arg2); Cleanup* next = c->next; delete c; c = next; } } } void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) { assert(func != NULL); Cleanup* c; if (cleanup_.function == NULL) { c = &cleanup_; } else { c = new Cleanup; c->next = cleanup_.next; cleanup_.next = c; } c->function = func; c->arg1 = arg1; c->arg2 = arg2; } namespace { class EmptyIterator : public Iterator { public: EmptyIterator(const Status& s) : status_(s) { } virtual bool Valid() const { return false; } virtual void Seek(const Slice& target) { } virtual void SeekToFirst() { } virtual void SeekToLast() { } virtual void Next() { assert(false); } virtual void Prev() { assert(false); } Slice key() const { assert(false); return Slice(); } Slice value() const { assert(false); return Slice(); } virtual Status status() const { return status_; } private: Status status_; }; } Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); } Iterator* NewErrorIterator(const Status& status) { return new EmptyIterator(status); } }
zzxiaogx-leveldb
table/iterator.cc
C++
bsd
1,666
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_H_ #include <stddef.h> #include <stdint.h> #include "leveldb/iterator.h" namespace leveldb { class Comparator; class Block { public: // Initialize the block with the specified contents. // Takes ownership of data[] and will delete[] it when done. Block(const char* data, size_t size); ~Block(); size_t size() const { return size_; } Iterator* NewIterator(const Comparator* comparator); private: uint32_t NumRestarts() const; const char* data_; size_t size_; uint32_t restart_offset_; // Offset in data_ of restart array // No copying allowed Block(const Block&); void operator=(const Block&); class Iter; }; } #endif // STORAGE_LEVELDB_TABLE_BLOCK_H_
zzxiaogx-leveldb
table/block.h
C++
bsd
990
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_ #define STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_ namespace leveldb { // A internal wrapper class with an interface similar to Iterator that // caches the valid() and key() results for an underlying iterator. // This can help avoid virtual function calls and also gives better // cache locality. class IteratorWrapper { public: IteratorWrapper(): iter_(NULL), valid_(false) { } explicit IteratorWrapper(Iterator* iter): iter_(NULL) { Set(iter); } ~IteratorWrapper() { delete iter_; } Iterator* iter() const { return iter_; } // Takes ownership of "iter" and will delete it when destroyed, or // when Set() is invoked again. void Set(Iterator* iter) { delete iter_; iter_ = iter; if (iter_ == NULL) { valid_ = false; } else { Update(); } } // Iterator interface methods bool Valid() const { return valid_; } Slice key() const { assert(Valid()); return key_; } Slice value() const { assert(Valid()); return iter_->value(); } // Methods below require iter() != NULL Status status() const { assert(iter_); return iter_->status(); } void Next() { assert(iter_); iter_->Next(); Update(); } void Prev() { assert(iter_); iter_->Prev(); Update(); } void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); } void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); } void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); } private: void Update() { valid_ = iter_->Valid(); if (valid_) { key_ = iter_->key(); } } Iterator* iter_; bool valid_; Slice key_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_ITERATOR_WRAPPER_H_
zzxiaogx-leveldb
table/iterator_wrapper.h
C++
bsd
2,031
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/format.h" #include "leveldb/env.h" #include "port/port.h" #include "table/block.h" #include "util/coding.h" #include "util/crc32c.h" namespace leveldb { void BlockHandle::EncodeTo(std::string* dst) const { // Sanity check that all fields have been set assert(offset_ != ~static_cast<uint64_t>(0)); assert(size_ != ~static_cast<uint64_t>(0)); PutVarint64(dst, offset_); PutVarint64(dst, size_); } Status BlockHandle::DecodeFrom(Slice* input) { if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) { return Status::OK(); } else { return Status::Corruption("bad block handle"); } } void Footer::EncodeTo(std::string* dst) const { #ifndef NDEBUG const size_t original_size = dst->size(); #endif metaindex_handle_.EncodeTo(dst); index_handle_.EncodeTo(dst); dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu)); PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32)); assert(dst->size() == original_size + kEncodedLength); } Status Footer::DecodeFrom(Slice* input) { const char* magic_ptr = input->data() + kEncodedLength - 8; const uint32_t magic_lo = DecodeFixed32(magic_ptr); const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4); const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) | (static_cast<uint64_t>(magic_lo))); if (magic != kTableMagicNumber) { return Status::InvalidArgument("not an sstable (bad magic number)"); } Status result = metaindex_handle_.DecodeFrom(input); if (result.ok()) { result = index_handle_.DecodeFrom(input); } if (result.ok()) { // We skip over any leftover data (just padding for now) in "input" const char* end = magic_ptr + 8; *input = Slice(end, input->data() + input->size() - end); } return result; } Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, const BlockHandle& handle, Block** block) { *block = NULL; // Read the block contents as well as the type/crc footer. // See table_builder.cc for the code that built this structure. size_t n = static_cast<size_t>(handle.size()); char* buf = new char[n + kBlockTrailerSize]; Slice contents; Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf); if (!s.ok()) { delete[] buf; return s; } if (contents.size() != n + kBlockTrailerSize) { delete[] buf; return Status::Corruption("truncated block read"); } // Check the crc of the type and the block contents const char* data = contents.data(); // Pointer to where Read put the data if (options.verify_checksums) { const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1)); const uint32_t actual = crc32c::Value(data, n + 1); if (actual != crc) { delete[] buf; s = Status::Corruption("block checksum mismatch"); return s; } } switch (data[n]) { case kNoCompression: if (data != buf) { // File implementation gave us pointer to some other data. // Copy into buf[]. memcpy(buf, data, n + kBlockTrailerSize); } // Ok break; case kSnappyCompression: { size_t ulength = 0; if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) { delete[] buf; return Status::Corruption("corrupted compressed block contents"); } char* ubuf = new char[ulength]; if (!port::Snappy_Uncompress(data, n, ubuf)) { delete[] buf; delete[] ubuf; return Status::Corruption("corrupted compressed block contents"); } delete[] buf; buf = ubuf; n = ulength; break; } default: delete[] buf; return Status::Corruption("bad block type"); } *block = new Block(buf, n); // Block takes ownership of buf[] return Status::OK(); } }
zzxiaogx-leveldb
table/format.cc
C++
bsd
4,167
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/two_level_iterator.h" #include "leveldb/table.h" #include "table/block.h" #include "table/format.h" #include "table/iterator_wrapper.h" namespace leveldb { namespace { typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&); class TwoLevelIterator: public Iterator { public: TwoLevelIterator( Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options); virtual ~TwoLevelIterator(); virtual void Seek(const Slice& target); virtual void SeekToFirst(); virtual void SeekToLast(); virtual void Next(); virtual void Prev(); virtual bool Valid() const { return data_iter_.Valid(); } virtual Slice key() const { assert(Valid()); return data_iter_.key(); } virtual Slice value() const { assert(Valid()); return data_iter_.value(); } virtual Status status() const { // It'd be nice if status() returned a const Status& instead of a Status if (!index_iter_.status().ok()) { return index_iter_.status(); } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) { return data_iter_.status(); } else { return status_; } } private: void SaveError(const Status& s) { if (status_.ok() && !s.ok()) status_ = s; } void SkipEmptyDataBlocksForward(); void SkipEmptyDataBlocksBackward(); void SetDataIterator(Iterator* data_iter); void InitDataBlock(); BlockFunction block_function_; void* arg_; const ReadOptions options_; Status status_; IteratorWrapper index_iter_; IteratorWrapper data_iter_; // May be NULL // If data_iter_ is non-NULL, then "data_block_handle_" holds the // "index_value" passed to block_function_ to create the data_iter_. std::string data_block_handle_; }; TwoLevelIterator::TwoLevelIterator( Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options) : block_function_(block_function), arg_(arg), options_(options), index_iter_(index_iter), data_iter_(NULL) { } TwoLevelIterator::~TwoLevelIterator() { } void TwoLevelIterator::Seek(const Slice& target) { index_iter_.Seek(target); InitDataBlock(); if (data_iter_.iter() != NULL) data_iter_.Seek(target); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToFirst() { index_iter_.SeekToFirst(); InitDataBlock(); if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToLast() { index_iter_.SeekToLast(); InitDataBlock(); if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); SkipEmptyDataBlocksBackward(); } void TwoLevelIterator::Next() { assert(Valid()); data_iter_.Next(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::Prev() { assert(Valid()); data_iter_.Prev(); SkipEmptyDataBlocksBackward(); } void TwoLevelIterator::SkipEmptyDataBlocksForward() { while (data_iter_.iter() == NULL || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { SetDataIterator(NULL); return; } index_iter_.Next(); InitDataBlock(); if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); } } void TwoLevelIterator::SkipEmptyDataBlocksBackward() { while (data_iter_.iter() == NULL || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { SetDataIterator(NULL); return; } index_iter_.Prev(); InitDataBlock(); if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); } } void TwoLevelIterator::SetDataIterator(Iterator* data_iter) { if (data_iter_.iter() != NULL) SaveError(data_iter_.status()); data_iter_.Set(data_iter); } void TwoLevelIterator::InitDataBlock() { if (!index_iter_.Valid()) { SetDataIterator(NULL); } else { Slice handle = index_iter_.value(); if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) { // data_iter_ is already constructed with this iterator, so // no need to change anything } else { Iterator* iter = (*block_function_)(arg_, options_, handle); data_block_handle_.assign(handle.data(), handle.size()); SetDataIterator(iter); } } } } Iterator* NewTwoLevelIterator( Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options) { return new TwoLevelIterator(index_iter, block_function, arg, options); } }
zzxiaogx-leveldb
table/two_level_iterator.cc
C++
bsd
4,666
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/table.h" #include <map> #include <string> #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "leveldb/table_builder.h" #include "table/block.h" #include "table/block_builder.h" #include "table/format.h" #include "util/random.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { // Return reverse of "key". // Used to test non-lexicographic comparators. static std::string Reverse(const Slice& key) { std::string str(key.ToString()); std::string rev(""); for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend(); ++rit) { rev.push_back(*rit); } return rev; } namespace { class ReverseKeyComparator : public Comparator { public: virtual const char* Name() const { return "leveldb.ReverseBytewiseComparator"; } virtual int Compare(const Slice& a, const Slice& b) const { return BytewiseComparator()->Compare(Reverse(a), Reverse(b)); } virtual void FindShortestSeparator( std::string* start, const Slice& limit) const { std::string s = Reverse(*start); std::string l = Reverse(limit); BytewiseComparator()->FindShortestSeparator(&s, l); *start = Reverse(s); } virtual void FindShortSuccessor(std::string* key) const { std::string s = Reverse(*key); BytewiseComparator()->FindShortSuccessor(&s); *key = Reverse(s); } }; } static ReverseKeyComparator reverse_key_comparator; static void Increment(const Comparator* cmp, std::string* key) { if (cmp == BytewiseComparator()) { key->push_back('\0'); } else { assert(cmp == &reverse_key_comparator); std::string rev = Reverse(*key); rev.push_back('\0'); *key = Reverse(rev); } } // An STL comparator that uses a Comparator namespace { struct STLLessThan { const Comparator* cmp; STLLessThan() : cmp(BytewiseComparator()) { } STLLessThan(const Comparator* c) : cmp(c) { } bool operator()(const std::string& a, const std::string& b) const { return cmp->Compare(Slice(a), Slice(b)) < 0; } }; } class StringSink: public WritableFile { public: ~StringSink() { } const std::string& contents() const { return contents_; } virtual Status Close() { return Status::OK(); } virtual Status Flush() { return Status::OK(); } virtual Status Sync() { return Status::OK(); } virtual Status Append(const Slice& data) { contents_.append(data.data(), data.size()); return Status::OK(); } private: std::string contents_; }; class StringSource: public RandomAccessFile { public: StringSource(const Slice& contents) : contents_(contents.data(), contents.size()) { } virtual ~StringSource() { } uint64_t Size() const { return contents_.size(); } virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { if (offset > contents_.size()) { return Status::InvalidArgument("invalid Read offset"); } if (offset + n > contents_.size()) { n = contents_.size() - offset; } memcpy(scratch, &contents_[offset], n); *result = Slice(scratch, n); return Status::OK(); } private: std::string contents_; }; typedef std::map<std::string, std::string, STLLessThan> KVMap; // Helper class for tests to unify the interface between // BlockBuilder/TableBuilder and Block/Table. class Constructor { public: explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { } virtual ~Constructor() { } void Add(const std::string& key, const Slice& value) { data_[key] = value.ToString(); } // Finish constructing the data structure with all the keys that have // been added so far. Returns the keys in sorted order in "*keys" // and stores the key/value pairs in "*kvmap" void Finish(const Options& options, std::vector<std::string>* keys, KVMap* kvmap) { *kvmap = data_; keys->clear(); for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) { keys->push_back(it->first); } data_.clear(); Status s = FinishImpl(options, *kvmap); ASSERT_TRUE(s.ok()) << s.ToString(); } // Construct the data structure from the data in "data" virtual Status FinishImpl(const Options& options, const KVMap& data) = 0; virtual size_t NumBytes() const = 0; virtual Iterator* NewIterator() const = 0; virtual const KVMap& data() { return data_; } virtual DB* db() const { return NULL; } // Overridden in DBConstructor private: KVMap data_; }; class BlockConstructor: public Constructor { public: explicit BlockConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp), block_size_(-1), block_(NULL) { } ~BlockConstructor() { delete block_; } virtual Status FinishImpl(const Options& options, const KVMap& data) { delete block_; block_ = NULL; BlockBuilder builder(&options); for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) { builder.Add(it->first, it->second); } // Open the block Slice block_data = builder.Finish(); block_size_ = block_data.size(); char* block_data_copy = new char[block_size_]; memcpy(block_data_copy, block_data.data(), block_size_); block_ = new Block(block_data_copy, block_size_); return Status::OK(); } virtual size_t NumBytes() const { return block_size_; } virtual Iterator* NewIterator() const { return block_->NewIterator(comparator_); } private: const Comparator* comparator_; int block_size_; Block* block_; BlockConstructor(); }; class TableConstructor: public Constructor { public: TableConstructor(const Comparator* cmp) : Constructor(cmp), source_(NULL), table_(NULL) { } ~TableConstructor() { Reset(); } virtual Status FinishImpl(const Options& options, const KVMap& data) { Reset(); StringSink sink; TableBuilder builder(options, &sink); for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) { builder.Add(it->first, it->second); ASSERT_TRUE(builder.status().ok()); } Status s = builder.Finish(); ASSERT_TRUE(s.ok()) << s.ToString(); ASSERT_EQ(sink.contents().size(), builder.FileSize()); // Open the table source_ = new StringSource(sink.contents()); Options table_options; table_options.comparator = options.comparator; return Table::Open(table_options, source_, sink.contents().size(), &table_); } virtual size_t NumBytes() const { return source_->Size(); } virtual Iterator* NewIterator() const { return table_->NewIterator(ReadOptions()); } uint64_t ApproximateOffsetOf(const Slice& key) const { return table_->ApproximateOffsetOf(key); } private: void Reset() { delete table_; delete source_; table_ = NULL; source_ = NULL; } StringSource* source_; Table* table_; TableConstructor(); }; // A helper class that converts internal format keys into user keys class KeyConvertingIterator: public Iterator { public: explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { } virtual ~KeyConvertingIterator() { delete iter_; } virtual bool Valid() const { return iter_->Valid(); } virtual void Seek(const Slice& target) { ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue); std::string encoded; AppendInternalKey(&encoded, ikey); iter_->Seek(encoded); } virtual void SeekToFirst() { iter_->SeekToFirst(); } virtual void SeekToLast() { iter_->SeekToLast(); } virtual void Next() { iter_->Next(); } virtual void Prev() { iter_->Prev(); } virtual Slice key() const { assert(Valid()); ParsedInternalKey key; if (!ParseInternalKey(iter_->key(), &key)) { status_ = Status::Corruption("malformed internal key"); return Slice("corrupted key"); } return key.user_key; } virtual Slice value() const { return iter_->value(); } virtual Status status() const { return status_.ok() ? iter_->status() : status_; } private: mutable Status status_; Iterator* iter_; // No copying allowed KeyConvertingIterator(const KeyConvertingIterator&); void operator=(const KeyConvertingIterator&); }; class MemTableConstructor: public Constructor { public: explicit MemTableConstructor(const Comparator* cmp) : Constructor(cmp), internal_comparator_(cmp) { memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); } ~MemTableConstructor() { memtable_->Unref(); } virtual Status FinishImpl(const Options& options, const KVMap& data) { memtable_->Unref(); memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); int seq = 1; for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) { memtable_->Add(seq, kTypeValue, it->first, it->second); seq++; } return Status::OK(); } virtual size_t NumBytes() const { return memtable_->ApproximateMemoryUsage(); } virtual Iterator* NewIterator() const { return new KeyConvertingIterator(memtable_->NewIterator()); } private: InternalKeyComparator internal_comparator_; MemTable* memtable_; }; class DBConstructor: public Constructor { public: explicit DBConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp) { db_ = NULL; NewDB(); } ~DBConstructor() { delete db_; } virtual Status FinishImpl(const Options& options, const KVMap& data) { delete db_; db_ = NULL; NewDB(); for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) { WriteBatch batch; batch.Put(it->first, it->second); ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok()); } return Status::OK(); } virtual size_t NumBytes() const { Range r("", "\xff\xff"); uint64_t size; db_->GetApproximateSizes(&r, 1, &size); return size; } virtual Iterator* NewIterator() const { return db_->NewIterator(ReadOptions()); } virtual DB* db() const { return db_; } private: void NewDB() { std::string name = test::TmpDir() + "/table_testdb"; Options options; options.comparator = comparator_; Status status = DestroyDB(name, options); ASSERT_TRUE(status.ok()) << status.ToString(); options.create_if_missing = true; options.error_if_exists = true; options.write_buffer_size = 10000; // Something small to force merging status = DB::Open(options, name, &db_); ASSERT_TRUE(status.ok()) << status.ToString(); } const Comparator* comparator_; DB* db_; }; enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST }; struct TestArgs { TestType type; bool reverse_compare; int restart_interval; }; static const TestArgs kTestArgList[] = { { TABLE_TEST, false, 16 }, { TABLE_TEST, false, 1 }, { TABLE_TEST, false, 1024 }, { TABLE_TEST, true, 16 }, { TABLE_TEST, true, 1 }, { TABLE_TEST, true, 1024 }, { BLOCK_TEST, false, 16 }, { BLOCK_TEST, false, 1 }, { BLOCK_TEST, false, 1024 }, { BLOCK_TEST, true, 16 }, { BLOCK_TEST, true, 1 }, { BLOCK_TEST, true, 1024 }, // Restart interval does not matter for memtables { MEMTABLE_TEST, false, 16 }, { MEMTABLE_TEST, true, 16 }, // Do not bother with restart interval variations for DB { DB_TEST, false, 16 }, { DB_TEST, true, 16 }, }; static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); class Harness { public: Harness() : constructor_(NULL) { } void Init(const TestArgs& args) { delete constructor_; constructor_ = NULL; options_ = Options(); options_.block_restart_interval = args.restart_interval; // Use shorter block size for tests to exercise block boundary // conditions more. options_.block_size = 256; if (args.reverse_compare) { options_.comparator = &reverse_key_comparator; } switch (args.type) { case TABLE_TEST: constructor_ = new TableConstructor(options_.comparator); break; case BLOCK_TEST: constructor_ = new BlockConstructor(options_.comparator); break; case MEMTABLE_TEST: constructor_ = new MemTableConstructor(options_.comparator); break; case DB_TEST: constructor_ = new DBConstructor(options_.comparator); break; } } ~Harness() { delete constructor_; } void Add(const std::string& key, const std::string& value) { constructor_->Add(key, value); } void Test(Random* rnd) { std::vector<std::string> keys; KVMap data; constructor_->Finish(options_, &keys, &data); TestForwardScan(keys, data); TestBackwardScan(keys, data); TestRandomAccess(rnd, keys, data); } void TestForwardScan(const std::vector<std::string>& keys, const KVMap& data) { Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); iter->SeekToFirst(); for (KVMap::const_iterator model_iter = data.begin(); model_iter != data.end(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Next(); } ASSERT_TRUE(!iter->Valid()); delete iter; } void TestBackwardScan(const std::vector<std::string>& keys, const KVMap& data) { Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); iter->SeekToLast(); for (KVMap::const_reverse_iterator model_iter = data.rbegin(); model_iter != data.rend(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Prev(); } ASSERT_TRUE(!iter->Valid()); delete iter; } void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys, const KVMap& data) { static const bool kVerbose = false; Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); KVMap::const_iterator model_iter = data.begin(); if (kVerbose) fprintf(stderr, "---\n"); for (int i = 0; i < 200; i++) { const int toss = rnd->Uniform(5); switch (toss) { case 0: { if (iter->Valid()) { if (kVerbose) fprintf(stderr, "Next\n"); iter->Next(); ++model_iter; ASSERT_EQ(ToString(data, model_iter), ToString(iter)); } break; } case 1: { if (kVerbose) fprintf(stderr, "SeekToFirst\n"); iter->SeekToFirst(); model_iter = data.begin(); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } case 2: { std::string key = PickRandomKey(rnd, keys); model_iter = data.lower_bound(key); if (kVerbose) fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); iter->Seek(Slice(key)); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } case 3: { if (iter->Valid()) { if (kVerbose) fprintf(stderr, "Prev\n"); iter->Prev(); if (model_iter == data.begin()) { model_iter = data.end(); // Wrap around to invalid value } else { --model_iter; } ASSERT_EQ(ToString(data, model_iter), ToString(iter)); } break; } case 4: { if (kVerbose) fprintf(stderr, "SeekToLast\n"); iter->SeekToLast(); if (keys.empty()) { model_iter = data.end(); } else { std::string last = data.rbegin()->first; model_iter = data.lower_bound(last); } ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } } } delete iter; } std::string ToString(const KVMap& data, const KVMap::const_iterator& it) { if (it == data.end()) { return "END"; } else { return "'" + it->first + "->" + it->second + "'"; } } std::string ToString(const KVMap& data, const KVMap::const_reverse_iterator& it) { if (it == data.rend()) { return "END"; } else { return "'" + it->first + "->" + it->second + "'"; } } std::string ToString(const Iterator* it) { if (!it->Valid()) { return "END"; } else { return "'" + it->key().ToString() + "->" + it->value().ToString() + "'"; } } std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) { if (keys.empty()) { return "foo"; } else { const int index = rnd->Uniform(keys.size()); std::string result = keys[index]; switch (rnd->Uniform(3)) { case 0: // Return an existing key break; case 1: { // Attempt to return something smaller than an existing key if (result.size() > 0 && result[result.size()-1] > '\0') { result[result.size()-1]--; } break; } case 2: { // Return something larger than an existing key Increment(options_.comparator, &result); break; } } return result; } } // Returns NULL if not running against a DB DB* db() const { return constructor_->db(); } private: Options options_; Constructor* constructor_; }; // Test the empty key TEST(Harness, SimpleEmptyKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 1); Add("", "v"); Test(&rnd); } } TEST(Harness, SimpleSingle) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 2); Add("abc", "v"); Test(&rnd); } } TEST(Harness, SimpleMulti) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 3); Add("abc", "v"); Add("abcd", "v"); Add("ac", "v2"); Test(&rnd); } } TEST(Harness, SimpleSpecialKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 4); Add("\xff\xff", "v3"); Test(&rnd); } } TEST(Harness, Randomized) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 5); for (int num_entries = 0; num_entries < 2000; num_entries += (num_entries < 50 ? 1 : 200)) { if ((num_entries % 10) == 0) { fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), int(kNumTestArgs), num_entries); } for (int e = 0; e < num_entries; e++) { std::string v; Add(test::RandomKey(&rnd, rnd.Skewed(4)), test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); } Test(&rnd); } } } TEST(Harness, RandomizedLongDB) { Random rnd(test::RandomSeed()); TestArgs args = { DB_TEST, false, 16 }; Init(args); int num_entries = 100000; for (int e = 0; e < num_entries; e++) { std::string v; Add(test::RandomKey(&rnd, rnd.Skewed(4)), test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); } Test(&rnd); // We must have created enough data to force merging int files = 0; for (int level = 0; level < config::kNumLevels; level++) { std::string value; char name[100]; snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level); ASSERT_TRUE(db()->GetProperty(name, &value)); files += atoi(value.c_str()); } ASSERT_GT(files, 0); } class MemTableTest { }; TEST(MemTableTest, Simple) { InternalKeyComparator cmp(BytewiseComparator()); MemTable* memtable = new MemTable(cmp); memtable->Ref(); WriteBatch batch; WriteBatchInternal::SetSequence(&batch, 100); batch.Put(std::string("k1"), std::string("v1")); batch.Put(std::string("k2"), std::string("v2")); batch.Put(std::string("k3"), std::string("v3")); batch.Put(std::string("largekey"), std::string("vlarge")); ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, memtable).ok()); Iterator* iter = memtable->NewIterator(); iter->SeekToFirst(); while (iter->Valid()) { fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), iter->value().ToString().c_str()); iter->Next(); } delete iter; memtable->Unref(); } static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", (unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(high)); } return result; } class TableTest { }; TEST(TableTest, ApproximateOffsetOfPlain) { TableConstructor c(BytewiseComparator()); c.Add("k01", "hello"); c.Add("k02", "hello2"); c.Add("k03", std::string(10000, 'x')); c.Add("k04", std::string(200000, 'x')); c.Add("k05", std::string(300000, 'x')); c.Add("k06", "hello3"); c.Add("k07", std::string(100000, 'x')); std::vector<std::string> keys; KVMap kvmap; Options options; options.block_size = 1024; options.compression = kNoCompression; c.Finish(options, &keys, &kvmap); ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 611000)); } static bool SnappyCompressionSupported() { std::string out; Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; return port::Snappy_Compress(in.data(), in.size(), &out); } TEST(TableTest, ApproximateOffsetOfCompressed) { if (!SnappyCompressionSupported()) { fprintf(stderr, "skipping compression tests\n"); return; } Random rnd(301); TableConstructor c(BytewiseComparator()); std::string tmp; c.Add("k01", "hello"); c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); c.Add("k03", "hello3"); c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); std::vector<std::string> keys; KVMap kvmap; Options options; options.block_size = 1024; options.compression = kSnappyCompression; c.Finish(options, &keys, &kvmap); ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6000)); } } int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
zzxiaogx-leveldb
table/table_test.cc
C++
bsd
23,716
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #include <vector> #include <stdint.h> #include "leveldb/slice.h" namespace leveldb { struct Options; class BlockBuilder { public: explicit BlockBuilder(const Options* options); // Reset the contents as if the BlockBuilder was just constructed. void Reset(); // REQUIRES: Finish() has not been callled since the last call to Reset(). // REQUIRES: key is larger than any previously added key void Add(const Slice& key, const Slice& value); // Finish building the block and return a slice that refers to the // block contents. The returned slice will remain valid for the // lifetime of this builder or until Reset() is called. Slice Finish(); // Returns an estimate of the current (uncompressed) size of the block // we are building. size_t CurrentSizeEstimate() const; // Return true iff no entries have been added since the last Reset() bool empty() const { return buffer_.empty(); } private: const Options* options_; std::string buffer_; // Destination buffer std::vector<uint32_t> restarts_; // Restart points int counter_; // Number of entries emitted since restart bool finished_; // Has Finish() been called? std::string last_key_; // No copying allowed BlockBuilder(const BlockBuilder&); void operator=(const BlockBuilder&); }; } #endif // STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
zzxiaogx-leveldb
table/block_builder.h
C++
bsd
1,745
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_MERGER_H_ #define STORAGE_LEVELDB_TABLE_MERGER_H_ namespace leveldb { class Comparator; class Iterator; // Return an iterator that provided the union of the data in // children[0,n-1]. Takes ownership of the child iterators and // will delete them when the result iterator is deleted. // // The result does no duplicate suppression. I.e., if a particular // key is present in K child iterators, it will be yielded K times. // // REQUIRES: n >= 0 extern Iterator* NewMergingIterator( const Comparator* comparator, Iterator** children, int n); } #endif // STORAGE_LEVELDB_TABLE_MERGER_H_
zzxiaogx-leveldb
table/merger.h
C++
bsd
845
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_ #define STORAGE_LEVELDB_TABLE_FORMAT_H_ #include <string> #include <stdint.h> #include "leveldb/slice.h" #include "leveldb/status.h" #include "leveldb/table_builder.h" namespace leveldb { class Block; class RandomAccessFile; struct ReadOptions; // BlockHandle is a pointer to the extent of a file that stores a data // block or a meta block. class BlockHandle { public: BlockHandle(); // The offset of the block in the file. uint64_t offset() const { return offset_; } void set_offset(uint64_t offset) { offset_ = offset; } // The size of the stored block uint64_t size() const { return size_; } void set_size(uint64_t size) { size_ = size; } void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); // Maximum encoding length of a BlockHandle enum { kMaxEncodedLength = 10 + 10 }; private: uint64_t offset_; uint64_t size_; }; // Footer encapsulates the fixed information stored at the tail // end of every table file. class Footer { public: Footer() { } // The block handle for the metaindex block of the table const BlockHandle& metaindex_handle() const { return metaindex_handle_; } void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; } // The block handle for the index block of the table const BlockHandle& index_handle() const { return index_handle_; } void set_index_handle(const BlockHandle& h) { index_handle_ = h; } void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); // Encoded length of a Footer. Note that the serialization of a // Footer will always occupy exactly this many bytes. It consists // of two block handles and a magic number. enum { kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8 }; private: BlockHandle metaindex_handle_; BlockHandle index_handle_; }; // kTableMagicNumber was picked by running // echo http://code.google.com/p/leveldb/ | sha1sum // and taking the leading 64 bits. static const uint64_t kTableMagicNumber = 0xdb4775248b80fb57ull; // 1-byte type + 32-bit crc static const size_t kBlockTrailerSize = 5; // Read the block identified by "handle" from "file". On success, // store a pointer to the heap-allocated result in *block and return // OK. On failure store NULL in *block and return non-OK. extern Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, const BlockHandle& handle, Block** block); // Implementation details follow. Clients should ignore, inline BlockHandle::BlockHandle() : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) { } } #endif // STORAGE_LEVELDB_TABLE_FORMAT_H_
zzxiaogx-leveldb
table/format.h
C++
bsd
2,979
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/table.h" #include "leveldb/cache.h" #include "leveldb/env.h" #include "table/block.h" #include "table/format.h" #include "table/two_level_iterator.h" #include "util/coding.h" namespace leveldb { struct Table::Rep { ~Rep() { delete index_block; } Options options; Status status; RandomAccessFile* file; uint64_t cache_id; BlockHandle metaindex_handle; // Handle to metaindex_block: saved from footer Block* index_block; }; Status Table::Open(const Options& options, RandomAccessFile* file, uint64_t size, Table** table) { *table = NULL; if (size < Footer::kEncodedLength) { return Status::InvalidArgument("file is too short to be an sstable"); } char footer_space[Footer::kEncodedLength]; Slice footer_input; Status s = file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength, &footer_input, footer_space); if (!s.ok()) return s; Footer footer; s = footer.DecodeFrom(&footer_input); if (!s.ok()) return s; // Read the index block Block* index_block = NULL; if (s.ok()) { s = ReadBlock(file, ReadOptions(), footer.index_handle(), &index_block); } if (s.ok()) { // We've successfully read the footer and the index block: we're // ready to serve requests. Rep* rep = new Table::Rep; rep->options = options; rep->file = file; rep->metaindex_handle = footer.metaindex_handle(); rep->index_block = index_block; rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0); *table = new Table(rep); } else { if (index_block) delete index_block; } return s; } Table::~Table() { delete rep_; } static void DeleteBlock(void* arg, void* ignored) { delete reinterpret_cast<Block*>(arg); } static void DeleteCachedBlock(const Slice& key, void* value) { Block* block = reinterpret_cast<Block*>(value); delete block; } static void ReleaseBlock(void* arg, void* h) { Cache* cache = reinterpret_cast<Cache*>(arg); Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h); cache->Release(handle); } // Convert an index iterator value (i.e., an encoded BlockHandle) // into an iterator over the contents of the corresponding block. Iterator* Table::BlockReader(void* arg, const ReadOptions& options, const Slice& index_value) { Table* table = reinterpret_cast<Table*>(arg); Cache* block_cache = table->rep_->options.block_cache; Block* block = NULL; Cache::Handle* cache_handle = NULL; BlockHandle handle; Slice input = index_value; Status s = handle.DecodeFrom(&input); // We intentionally allow extra stuff in index_value so that we // can add more features in the future. if (s.ok()) { if (block_cache != NULL) { char cache_key_buffer[16]; EncodeFixed64(cache_key_buffer, table->rep_->cache_id); EncodeFixed64(cache_key_buffer+8, handle.offset()); Slice key(cache_key_buffer, sizeof(cache_key_buffer)); cache_handle = block_cache->Lookup(key); if (cache_handle != NULL) { block = reinterpret_cast<Block*>(block_cache->Value(cache_handle)); } else { s = ReadBlock(table->rep_->file, options, handle, &block); if (s.ok() && options.fill_cache) { cache_handle = block_cache->Insert( key, block, block->size(), &DeleteCachedBlock); } } } else { s = ReadBlock(table->rep_->file, options, handle, &block); } } Iterator* iter; if (block != NULL) { iter = block->NewIterator(table->rep_->options.comparator); if (cache_handle == NULL) { iter->RegisterCleanup(&DeleteBlock, block, NULL); } else { iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle); } } else { iter = NewErrorIterator(s); } return iter; } Iterator* Table::NewIterator(const ReadOptions& options) const { return NewTwoLevelIterator( rep_->index_block->NewIterator(rep_->options.comparator), &Table::BlockReader, const_cast<Table*>(this), options); } uint64_t Table::ApproximateOffsetOf(const Slice& key) const { Iterator* index_iter = rep_->index_block->NewIterator(rep_->options.comparator); index_iter->Seek(key); uint64_t result; if (index_iter->Valid()) { BlockHandle handle; Slice input = index_iter->value(); Status s = handle.DecodeFrom(&input); if (s.ok()) { result = handle.offset(); } else { // Strange: we can't decode the block handle in the index block. // We'll just return the offset of the metaindex block, which is // close to the whole file size for this case. result = rep_->metaindex_handle.offset(); } } else { // key is past the last key in the file. Approximate the offset // by returning the offset of the metaindex block (which is // right near the end of the file). result = rep_->metaindex_handle.offset(); } delete index_iter; return result; } }
zzxiaogx-leveldb
table/table.cc
C++
bsd
5,258
# -*- coding: utf-8 -*- """ @autor Emilio Ramirez """ class ConnectionDB: """This class manage the connection with the Database""" def __init__(self, host='localhost', port=3306, namedb='', user='', password='', scheme=''): """ Constructor of class @param String host of the dbms @param String name of the database @param String user of the database @param String password of the user on database @param String scheme (only in postgres)""" self._host = host self._port = port self._namedb = namedb self._user = user self._password = password self._scheme = scheme self._tryAgain = True self._connection = None self._cursor = None def connect_mysql(self): """Provide the connection with MySQL db and set the cursor """ try: self.dbapi = __import__('MySQLdb') self._connection = self.dbapi.connect(host=self._host, port=self._port, user=self._user, passwd=self._password, db=self._namedb) self._cursor = self._connection.cursor() except (ImportError): print "Driver MySQL couldn't be loaded." except (self.dbapi.Error), e: print "Error trying to connect:", e def close_connection(self): """Close the cursor and the connection's DBMS""" try: self._cursor.close() self._connection.close() except (Exception), e: print "Error trying close the cursor or connection with db", e def insert_row(self, tablename, data): """Insert a row in a table @param String tablename @param List data""" try: self._cursor.execute("INSERT INTO "+ tablename + "(" + ",".join(data[0]) + ")"\ " VALUES (" + ("%s,"*len(data[1]))[:-1] + ")", data[1]) except (Exception), e: print "insert_row error:",e def update(self, tablenames): """Update a row in a table @param String tablename""" pass def delete(self, tablename, where): """Delete a row in a table @param String tablename @param String where""" try: self._cursor.execute("DELETE FROM "+ tablename +" WHERE "+ where) except (Exception), e: print "delete error:",e def select(self, select): """Query select with the complete string query @param String select return check _cursor methods fetchone, fetchmany o fetchall""" try: self._cursor.execute(select) except (Exception), e: print "select error:",e def select_where(self, tablename, where): """Query select with only where clausule @param String tablename @param String where return check _cursor methods fetchone, fetchmany o fetchall""" try: self._cursor.execute("SELECT * FROM " + tablename + " WHERE " + where) except (Exception), e: print "select_where error:",e def commit(self): """Commit the transactions""" try: self._connection.commit() except (Exception), e: print "commit error:",e if __name__ == '__main__': import datetime,random #create the connection conn_db = ConnectionDB(namedb='quickdb', user='quickdb', password='quickdb') conn_db.connect_mysql() #some data test nombres = ["pab","tsb","aed","pdp","gda"] col = ['nombre','fecha', 'salary', 'date'] #~ import datetime val = [ nombres[random.randint(0, 4)], random.randint(1000, 2000), 50.69, datetime.date.today()] data = [col, val] #test insert_row method #conn_db.insert_row('libro', data) #test the select method #~ conn_db.select("SELECT * FROM libro") #~ for tupla in conn_db._cursor.fetchall(): #~ print tupla #test the select_where method #~ conn_db.select_where("libro", "nombre='aed'") #~ for tupla in conn_db._cursor.fetchall(): #~ print tupla #test delete method #conn_db.delete('libro', 'nombre="analisis"') #test the commit method conn_db.commit() #test the close_connection method conn_db.close_connection()
zzqchy-qaw1
python/quickdb/db/connection.py
Python
lgpl
4,338
import re import datetime def is_valid_field(): pass
zzqchy-qaw1
python/quickdb/validation.py
Python
lgpl
58
package quickdb.model; import quickdb.annotation.*; @Table public class Page{ @Column(type=Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition(type=Definition.DATATYPE.INTEGER) private int pageNumber; public Page(){} public Page(int p){ this.pageNumber = p; } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the pageNumber */ public int getPageNumber() { return pageNumber; } /** * @param pageNumber the pageNumber to set */ public void setPageNumber(int pageNumber) { this.pageNumber = pageNumber; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Page.java
Java
lgpl
893
package quickdb.model; import java.util.ArrayList; public class Pruebas { private int id; private String descripcion; private ArrayList prueba2; public Pruebas(){ } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the descripcion */ public String getDescripcion() { return descripcion; } /** * @param descripcion the descripcion to set */ public void setDescripcion(String descripcion) { this.descripcion = descripcion; } /** * @return the array */ public ArrayList getPrueba2() { return prueba2; } /** * @param array the array to set */ public void setPrueba2(ArrayList array) { this.prueba2 = array; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Pruebas.java
Java
lgpl
918
package quickdb.model; import quickdb.annotation.*; @Table public class Person{ @Column(type=Properties.TYPES.PRIMARYKEY) private int id; @Column(name="name") private String personName; @Column(getter="getPersonAge", setter="setPersonAge") private int age; public Person(){ this.personName = ""; } public Person(String name, int age){ this.id = 0; this.personName = name; this.age = age; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getPersonName() { return personName; } public void setPersonName(String personName) { this.personName = personName; } public int getPersonAge() { return age; } public void setPersonAge(int age) { this.age = age; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Person.java
Java
lgpl
877
package quickdb.model; import quickdb.annotation.*; @Table public class Prueba{ @Column(type=Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition private String name; public Prueba(){ this.name = ""; } public Prueba(String name){ this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Prueba.java
Java
lgpl
670
package quickdb.model; import quickdb.annotation.*; @Table public class PruebaParent { @Column(type = Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition private String nameParent; @Column @ColumnDefinition(type=Definition.DATATYPE.INTEGER) private int number; public PruebaParent(){ this.nameParent = ""; } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the nameParent */ public String getNameParent() { return nameParent; } /** * @param nameParent the nameParent to set */ public void setNameParent(String nameParent) { this.nameParent = nameParent; } /** * @return the number */ public int getNumber() { return number; } /** * @param number the number to set */ public void setNumber(int number) { this.number = number; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/PruebaParent.java
Java
lgpl
1,187
package quickdb.model; import quickdb.annotation.*; @Parent @Table public class PruebaChild extends PruebaParent{ @Column(type=Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition private String name; public PruebaChild(){ this.name = ""; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/PruebaChild.java
Java
lgpl
644
package quickdb.model; public class Dog{ private int id; private String name; private String color; private Race idRace; public Dog(){ this.idRace = new Race(); this.name = ""; this.color = ""; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getColor() { return color; } public void setColor(String color) { this.color = color; } public Race getIdRace() { return idRace; } public void setIdRace(Race idRace) { this.idRace = idRace; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Dog.java
Java
lgpl
765
package quickdb.model; import quickdb.annotation.Table; @Table("AlterTable") public class Alter2 { private int id; private String name; private double extend; public void setExtend(double extend) { this.extend = extend; } public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public double getExtend() { return extend; } public int getId() { return id; } public String getName() { return name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Alter2.java
Java
lgpl
561
package quickdb.model; import quickdb.annotation.*; @Table("address") public class Address{ @Column(type=Properties.TYPES.PRIMARYKEY) private int id; @Column @ColumnDefinition private String street; @Column(type=Properties.TYPES.FOREIGNKEY) private District idDistrict; public Address(){ this.street = ""; this.idDistrict = new District(""); } public Address(String street, District district){ this.street = street; this.idDistrict = district; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getStreet() { return street; } public void setStreet(String street) { this.street = street; } public District getIdDistrict() { return idDistrict; } public void setIdDistrict(District idDistrict) { this.idDistrict = idDistrict; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Address.java
Java
lgpl
953
package quickdb.model; import java.util.ArrayList; import quickdb.annotation.*; @Table public class Book{ @Column(type=Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition private String name; @Column @ColumnDefinition(type=Definition.DATATYPE.INTEGER) private int isbn; @Column(type=Properties.TYPES.COLLECTION) @ColumnDefinition(type=Definition.DATATYPE.INTEGER) private ArrayList page; public Book(){ this.name = ""; this.page = new ArrayList(); } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the name */ public String getName() { return name; } /** * @param name the name to set */ public void setName(String name) { this.name = name; } /** * @return the isbn */ public int getIsbn() { return isbn; } /** * @param isbn the isbn to set */ public void setIsbn(int isbn) { this.isbn = isbn; } /** * @return the pages */ public ArrayList getPage() { return page; } /** * @param pages the pages to set */ public void setPage(ArrayList pages) { this.page = pages; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Book.java
Java
lgpl
1,509
package quickdb.model; import quickdb.annotation.Table; @Table("AlterTable") public class Alter1 { private int id; private String name; public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public int getId() { return id; } public String getName() { return name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Alter1.java
Java
lgpl
392
package quickdb.model; public class Race{ private int id; private String name; public Race(){ this.name = ""; } public Race(String name){ this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Race.java
Java
lgpl
446
package quickdb.model; import quickdb.annotation.*; @Table("district") public class District{ @Column(type=Properties.TYPES.PRIMARYKEY) private int id; @Column private String name; public District(String name){ this.name = name; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/District.java
Java
lgpl
511
package quickdb.model; public class Prueba2 { private int id; private String nombre; public Prueba2(){ } /** * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(int id) { this.id = id; } /** * @return the nombre */ public String getNombre() { return nombre; } /** * @param nombre the nombre to set */ public void setNombre(String nombre) { this.nombre = nombre; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Prueba2.java
Java
lgpl
568
package quickdb.model; import java.sql.Date; public class Primitive { private int id; private int intNumber; private double doubleNumber; private float floatNumber; private String string; private Date date; public void setDate(Date date) { this.date = date; } public void setDoubleNumber(double doubleNumber) { this.doubleNumber = doubleNumber; } public void setFloatNumber(float floatNumber) { this.floatNumber = floatNumber; } public void setId(int id) { this.id = id; } public void setIntNumber(int intNumber) { this.intNumber = intNumber; } public void setString(String string) { this.string = string; } public Date getDate() { return date; } public double getDoubleNumber() { return doubleNumber; } public float getFloatNumber() { return floatNumber; } public int getId() { return id; } public int getIntNumber() { return intNumber; } public String getString() { return string; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/Primitive.java
Java
lgpl
1,119
package quickdb.model; import quickdb.annotation.*; @Table public class SuperPrueba{ @Column(type=Properties.TYPES.PRIMARYKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11, autoIncrement=true, primary=true) private int id; @Column @ColumnDefinition private String name; @Column(type=Properties.TYPES.FOREIGNKEY) @ColumnDefinition(type=Definition.DATATYPE.INT, length=11) private Prueba prueba; public SuperPrueba(){ this.name = ""; this.prueba = new Prueba(); } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } /** * @return the prueba */ public Prueba getPrueba() { return prueba; } /** * @param prueba the prueba to set */ public void setPrueba(Prueba prueba) { this.prueba = prueba; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/model/SuperPrueba.java
Java
lgpl
1,026
package quickdb.binding.model; import quickdb.db.AdminBinding; import java.sql.Date; public class BindingObject extends AdminBinding{ private int id; private String name; private Date birth; private double salary; public void setBirth(Date birth) { this.birth = birth; } public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public void setSalary(double salary) { this.salary = salary; } public Date getBirth() { return birth; } public int getId() { return id; } public String getName() { return name; } public double getSalary() { return salary; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/binding/model/BindingObject.java
Java
lgpl
752
package quickdb.binding; import quickdb.binding.model.BindingObject; public class BindingTest { public BindingTest(){ System.out.println("-----------------"); System.out.println("BindingTest"); System.out.println("testDelete"); this.testDelete(); System.out.println("testModify"); this.testModify(); System.out.println("testObtain"); this.testObtain(); System.out.println("testObtainSelect"); this.testObtainSelect(); System.out.println("testObtainString"); this.testObtainString(); System.out.println("testObtainWhere"); this.testObtainWhere(); System.out.println("testSave"); this.testSave(); System.out.println("testSaveGetIndex"); this.testSaveGetIndex(); } public void testSave(){ BindingObject bind = new BindingObject(); bind.setBirth(new java.sql.Date(104, 4, 20)); bind.setName("quickdb"); bind.setSalary(3000.50); System.out.println(bind.save()); } public void testSaveGetIndex(){ BindingObject bind = new BindingObject(); bind.setBirth(new java.sql.Date(104, 4, 20)); bind.setName("quickdb"); bind.setSalary(3000.50); System.out.println((bind.saveGetIndex() > 0)); } public void testObtain(){ BindingObject bind = new BindingObject(); bind.obtain().If("name").equal("quickdb").find(); System.out.println("quickdb".equalsIgnoreCase(bind.getName())); System.out.println(3000.50 == bind.getSalary()); } public void testObtainWhere(){ BindingObject bind = new BindingObject(); bind.obtainWhere("name = 'quickdb'"); System.out.println("quickdb".equalsIgnoreCase(bind.getName())); System.out.println(3000.50 == bind.getSalary()); } public void testObtainString(){ BindingObject bind = new BindingObject(); bind.obtain("name = 'quickdb'"); System.out.println("quickdb".equalsIgnoreCase(bind.getName())); System.out.println(3000.50 == bind.getSalary()); } public void testObtainSelect(){ BindingObject bind = new BindingObject(); bind.obtainSelect("SELECT * FROM BindingObject WHERE name = 'quickdb'"); System.out.println("quickdb".equalsIgnoreCase(bind.getName())); System.out.println(3000.50 == bind.getSalary()); } public void testDelete(){ BindingObject bind = new BindingObject(); bind.setBirth(new java.sql.Date(104, 4, 20)); bind.setName("quickdb2"); bind.setSalary(3000.50); int index = bind.saveGetIndex(); System.out.println( (index > 0) ); System.out.println(bind.obtainWhere("id = "+index)); System.out.println(bind.delete()); } public void testModify(){ BindingObject bind = new BindingObject(); bind.setBirth(new java.sql.Date(104, 4, 20)); bind.setName("quickdb3"); bind.setSalary(3000.50); bind.save(); System.out.println(bind.obtainWhere("name = 'quickdb3'")); bind.setName("quickdb4"); System.out.println(bind.modify()); BindingObject b = new BindingObject(); System.out.println(b.obtainWhere("name = 'quickdb4'")); System.out.println("quickdb4".equalsIgnoreCase(b.getName())); } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/binding/BindingTest.java
Java
lgpl
3,406
package quickdb.query.model; public class AnotherClass extends AnotherParent{ private int id; private String property; public void setProperty(String property) { this.property = property; } public String getProperty() { return property; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/AnotherClass.java
Java
lgpl
286
package quickdb.query.model; public class ReferenceQuery extends ReferenceParent{ private int id; private String value; public void setValue(String value) { this.value = value; } public String getValue() { return value; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/ReferenceQuery.java
Java
lgpl
269
package quickdb.query.model; /** * * @author Diego Sarmentero */ public class QueryWithSubquery { private int id; private String name; private ObjectSubquery subQuery; public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public int getId() { return id; } public String getName() { return name; } public void setSubQuery(ObjectSubquery subQuery) { this.subQuery = subQuery; } public ObjectSubquery getSubQuery() { return subQuery; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/QueryWithSubquery.java
Java
lgpl
600
package quickdb.query.model; import quickdb.annotation.Column; /** * * @author Diego Sarmentero */ public class ObjectSummary { private int id; private int value; //THIS @Column(summary="+value") private double summmary; @Column(name="accountSalary") private double salary; //THIS @Column(summary="%salary") private double promSalary; @Column(summary=">value") private double maxValue; @Column(summary="<value") private double minValue; @Column(summary="#value") private double countValue; public void setCountValue(double countValue) { this.countValue = countValue; } public void setId(int id) { this.id = id; } public void setMaxValue(double maxValue) { this.maxValue = maxValue; } public void setMinValue(double minValue) { this.minValue = minValue; } public void setPromSalary(double promSalary) { this.promSalary = promSalary; } public void setSalary(double salary) { this.salary = salary; } public void setSummmary(double summmary) { this.summmary = summmary; } public void setValue(int value) { this.value = value; } public double getCountValue() { return countValue; } public int getId() { return id; } public double getMaxValue() { return maxValue; } public double getMinValue() { return minValue; } public double getPromSalary() { return promSalary; } public double getSalary() { return salary; } public double getSummmary() { return summmary; } public int getValue() { return value; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/ObjectSummary.java
Java
lgpl
1,733
package quickdb.query.model; import java.sql.Date; public class CompleteQuery { private int id; private String name; private double salary; private int age; private Date birth; private boolean cond; public void setAge(int age) { this.age = age; } public int getAge() { return age; } public void setCond(boolean cond) { this.cond = cond; } public boolean getCond() { return cond; } public void setBirth(Date birth) { this.birth = birth; } public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public void setSalary(double salary) { this.salary = salary; } public Date getBirth() { return birth; } public int getId() { return id; } public String getName() { return name; } public double getSalary() { return salary; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/CompleteQuery.java
Java
lgpl
995
package quickdb.query.model; public class AnotherParent { private int id; private double mount; private String description; public void setDescription(String description) { this.description = description; } public void setId(int id) { this.id = id; } public void setMount(double mount) { this.mount = mount; } public String getDescription() { return description; } public int getId() { return id; } public double getMount() { return mount; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/AnotherParent.java
Java
lgpl
561
package quickdb.query.model; public class ReferenceParent { private int id; private String valueParent; public void setId(int id) { this.id = id; } public void setValueParent(String valueParent) { this.valueParent = valueParent; } public int getId() { return id; } public String getValueParent() { return valueParent; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/ReferenceParent.java
Java
lgpl
401
package quickdb.query.model; /** * * @author Diego Sarmentero */ public class ObjectSubquery { private int id; private String value; public ObjectSubquery(String value) { this.value = value; } public ObjectSubquery() { } public void setId(int id) { this.id = id; } public void setValue(String value) { this.value = value; } public int getId() { return id; } public String getValue() { return value; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/ObjectSubquery.java
Java
lgpl
511
package quickdb.query.model; public class UserQuery extends UserParent{ private int id; private String name; public void setId(int id) { this.id = id; } public void setName(String name) { this.name = name; } public int getId() { return id; } public String getName() { return name; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/UserQuery.java
Java
lgpl
364
package quickdb.query.model; public class UserParent { private int id; private String description; private ReferenceQuery reference; public void setDescription(String description) { this.description = description; } public void setId(int id) { this.id = id; } public void setReference(ReferenceQuery reference) { this.reference = reference; } public String getDescription() { return description; } public int getId() { return id; } public ReferenceQuery getReference() { return reference; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/UserParent.java
Java
lgpl
610
package quickdb.query.model; import java.util.ArrayList; /** * * @author Diego Sarmentero */ public class QueryWithCollection { private int id; private String description; private ArrayList<QueryCollection> queryCollection; public void setDescription(String description) { this.description = description; } public void setId(int id) { this.id = id; } public String getDescription() { return description; } public int getId() { return id; } public void setQueryCollection(ArrayList<QueryCollection> queryCollection) { this.queryCollection = queryCollection; } public ArrayList<QueryCollection> getQueryCollection() { return queryCollection; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/QueryWithCollection.java
Java
lgpl
765
package quickdb.query.model; /** * * @author Diego Sarmentero */ public class QueryCollection { private int id; private int value; public QueryCollection() { } public QueryCollection(int value) { this.value = value; } public void setId(int id) { this.id = id; } public void setValue(int value) { this.value = value; } public int getId() { return id; } public int getValue() { return value; } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/model/QueryCollection.java
Java
lgpl
502
package quickdb.query; import quickdb.db.AdminBase; import quickdb.query.model.AnotherClass; import quickdb.query.model.AnotherParent; import quickdb.query.model.CompleteQuery; import quickdb.query.model.ObjectSubquery; import quickdb.query.model.ObjectSummary; import quickdb.query.model.QueryCollection; import quickdb.query.model.QueryWithCollection; import quickdb.query.model.QueryWithSubquery; import quickdb.query.model.ReferenceQuery; import quickdb.query.model.UserQuery; import java.sql.Date; import java.util.ArrayList; public class QueryTest { private AdminBase admin; public QueryTest(AdminBase admin){ this.admin = admin; System.out.println("--------------------------"); System.out.println("QueryTest"); if( !(this.admin.checkTableExist("UserQuery") && this.admin.checkTableExist("UserParent") && this.admin.checkTableExist("AnotherClass") && this.admin.checkTableExist("AnotherParent") && this.admin.checkTableExist("ReferenceQuery") && this.admin.checkTableExist("ReferenceParent") && this.admin.checkTableExist("CompleteQuery")) ){ this.admin.executeQuery("DROP TABLE UserQuery"); this.admin.executeQuery("DROP TABLE UserParent"); this.admin.executeQuery("DROP TABLE AnotherClass"); this.admin.executeQuery("DROP TABLE AnotherParent"); this.admin.executeQuery("DROP TABLE ReferenceQuery"); this.admin.executeQuery("DROP TABLE ReferenceParent"); this.admin.executeQuery("DROP TABLE CompleteQuery"); UserQuery user = new UserQuery(); user.setDescription("parent description"); user.setName("son name"); ReferenceQuery reference = new ReferenceQuery(); reference.setValue("son value"); reference.setValueParent("value parent"); user.setReference(reference); this.admin.save(user); UserQuery user2 = new UserQuery(); user2.setDescription("parent description2"); user2.setName("son name2"); ReferenceQuery reference2 = new ReferenceQuery(); reference2.setValue("son value2"); reference2.setValueParent("value parent2"); user2.setReference(reference2); this.admin.save(user2); AnotherClass another = new AnotherClass(); another.setMount(55.35); another.setProperty("son name"); another.setDescription("parent description"); this.admin.save(another); CompleteQuery query = new CompleteQuery(); query.setName("diego sarmentero"); query.setSalary(3500.50); query.setAge(20); query.setBirth(new java.sql.Date(85, 9, 2)); query.setCond(true); this.admin.save(query); CompleteQuery query2 = new CompleteQuery(); query2.setName("cat"); query2.setAge(24); query2.setSalary(1500.50); query2.setBirth(new java.sql.Date(104, 4, 20)); query2.setCond(false); this.admin.save(query2); } System.out.println("testBetweenAndLower"); this.testBetweenAndLower(); System.out.println("testDateDayLower"); this.testDateDayLower(); System.out.println("testDateDifferenceWith"); this.testDateDifferenceWith(); System.out.println("testDateMonthEqual"); this.testDateMonthEqual(); System.out.println("testGroupHaving"); this.testGroupHaving(); System.out.println("testInOrMatch"); this.testInOrMatch(); System.out.println("testIsNotNullAndGreater"); this.testIsNotNullAndGreater(); System.out.println("testMatchOrNotEqual"); this.testMatchOrNotEqual(); System.out.println("testQueryIfGroupWithSummary"); this.testQueryIfGroupWithSummary(); System.out.println("testQueryWithCollection"); this.testQueryWithCollection(); System.out.println("testRelatedWithOtherClassInheritanceObtain"); this.testRelatedWithOtherClassInheritanceObtain(); System.out.println("testRelatedWithOtherClassObtain"); this.testRelatedWithOtherClassObtain(); System.out.println("testSimpleInheritanceObtain"); this.testSimpleInheritanceObtain(); System.out.println("testSimpleObtain"); this.testSimpleObtain(); System.out.println("testSort"); this.testSort(); System.out.println("testSubQuery"); this.testSubQuery(); System.out.println("testSummaryAttributes"); this.testSummaryAttributes(); System.out.println("testWithReferenceInheritanceObtain"); this.testWithReferenceInheritanceObtain(); System.out.println("testWithReferenceObtain"); this.testWithReferenceObtain(); } public void testSimpleObtain(){ //single case: atribute from class UserQuery user = new UserQuery(); admin.obtain(user).If("name").equal("son name").find(); System.out.println("son name".equalsIgnoreCase(user.getName())); } public void testSimpleInheritanceObtain(){ UserQuery user = new UserQuery(); admin.obtain(user).If("description").equal("parent description2").find(); System.out.println("son name2".equalsIgnoreCase(user.getName())); } public void testWithReferenceObtain(){ UserQuery user = new UserQuery(); admin.obtain(user).If("value", ReferenceQuery.class).equal("son value").find(); System.out.println("parent description".equalsIgnoreCase(user.getDescription())); } public void testWithReferenceInheritanceObtain(){ UserQuery user = new UserQuery(); admin.obtain(user).If("valueParent", ReferenceQuery.class).equal("value Parent").find(); System.out.println("son value".equalsIgnoreCase(user.getReference().getValue())); } public void testRelatedWithOtherClassObtain(){ UserQuery user = new UserQuery(); admin.obtain(user).If("property", AnotherClass.class).equal("son name"). and("name").equal("property", AnotherClass.class).find(); System.out.println("son value".equalsIgnoreCase(user.getReference().getValue())); UserQuery user3 = new UserQuery(); admin.obtain(user3).If("name").equal("property", AnotherClass.class). and("property", AnotherClass.class).equal("son name").find(); System.out.println("son value".equalsIgnoreCase(user3.getReference().getValue())); } public void testRelatedWithOtherClassInheritanceObtain(){ UserQuery user = new UserQuery(); admin.obtain(user).If("description").equal("description", AnotherParent.class). and("description", AnotherParent.class).equal("parent description").find(); System.out.println("son value".equalsIgnoreCase(user.getReference().getValue())); } public void testMatchOrNotEqual(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("name"). match("diego sarmentero").or("cond").notEqual(true).findAll(); System.out.println(2 == array.size()); } public void testBetweenAndLower(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("birth").inRange("1980-01-01", "2010-12-31"). and("salary").lower(2000).findAll(); System.out.println(1 == array.size()); } public void testInOrMatch(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("age").in(22, 23, 24, 25). or("name").match("sarmentero").findAll(); System.out.println(2 == array.size()); } public void testIsNotNullAndGreater(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("name").isNotNull(). and("salary").equalORgreater(2000).findAll(); System.out.println(1 == array.size()); } public void testSort(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).sort(true, "salary").findAll(); System.out.println("cat".equalsIgnoreCase(((CompleteQuery)array.get(0)).getName())); } public void testGroupHaving(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("age").greater(10). group("salary").ifGroup("salary").greater(2000).findAll(); System.out.println(1 == array.size()); } public void testDateMonthEqual(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("birth").date().month().equal(5).findAll(); System.out.println(1 == array.size()); System.out.println("cat".equalsIgnoreCase(((CompleteQuery)array.get(0)).getName())); } public void testDateDayLower(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("birth").date(). day().lower(21).findAll(); System.out.println(2 == array.size()); } public void testDateDifferenceWith(){ CompleteQuery query = new CompleteQuery(); ArrayList array = admin.obtain(query).If("birth").date(). differenceWith("20040522").equal(2).findAll(); System.out.println(1 == array.size()); java.sql.Date date = new Date(104, 4, 22); array = admin.obtain(query).If("birth").date(). differenceWith(date.toString()).equal(2).findAll(); System.out.println(1 == array.size()); } public void testQueryWithCollection(){ QueryWithCollection with = new QueryWithCollection(); with.setDescription("description"); ArrayList<QueryCollection> qu = new ArrayList<QueryCollection>(); qu.add(new QueryCollection(11)); qu.add(new QueryCollection(22)); qu.add(new QueryCollection(33)); with.setQueryCollection(qu); System.out.println(admin.save(with)); QueryWithCollection with2 = new QueryWithCollection(); with2.setDescription("description2"); ArrayList<QueryCollection> qu2 = new ArrayList<QueryCollection>(); qu2.add(new QueryCollection(44)); qu2.add(new QueryCollection(55)); qu2.add(new QueryCollection(66)); with2.setQueryCollection(qu2); System.out.println(admin.save(with2)); QueryWithCollection with3 = new QueryWithCollection(); admin.obtain(with3).If("value", QueryCollection.class, "queryCollection").equal(22).find(); System.out.println("description".equalsIgnoreCase(with3.getDescription())); System.out.println(33 == with3.getQueryCollection().get(2).getValue()); } public void testSubQuery(){ QueryWithSubquery sub = new QueryWithSubquery(); sub.setName("subQuery1"); sub.setSubQuery(new ObjectSubquery("name1")); System.out.println(admin.save(sub)); QueryWithSubquery sub2 = new QueryWithSubquery(); sub2.setName("subQuery2"); sub2.setSubQuery(new ObjectSubquery("subQuery2")); System.out.println(admin.save(sub2)); QueryWithSubquery sub3 = new QueryWithSubquery(); sub3.setName("subQuery3"); sub3.setSubQuery(new ObjectSubquery("name3")); System.out.println(admin.save(sub3)); QueryWithSubquery sub4 = new QueryWithSubquery(); admin.obtain(sub4).If().For("value", ObjectSubquery.class).closeFor().in("name", QueryWithSubquery.class).find(); System.out.println("subQuery2".equalsIgnoreCase(sub4.getName())); System.out.println("subQuery2".equalsIgnoreCase(sub4.getSubQuery().getValue())); QueryWithSubquery sub5 = new QueryWithSubquery(); admin.obtain(sub5).If("id").greater(0).and().For("value", ObjectSubquery.class). closeFor().in("name", QueryWithSubquery.class).find(); System.out.println("subQuery2".equalsIgnoreCase(sub5.getName())); System.out.println("subQuery2".equalsIgnoreCase(sub5.getSubQuery().getValue())); } public void testSummaryAttributes(){ if(this.admin.checkTableExist("ObjectSummary")){ this.admin.executeQuery("DROP TABLE ObjectSummary"); } ObjectSummary summ = new ObjectSummary(); summ.setValue(23); summ.setSalary(100.5); System.out.println(admin.save(summ)); ObjectSummary summ2 = new ObjectSummary(); summ2.setValue(5); summ2.setSalary(500.5); System.out.println(admin.save(summ2)); ObjectSummary summ3 = new ObjectSummary(); summ3.setValue(34); summ3.setSalary(200); System.out.println(admin.save(summ3)); ObjectSummary summ4 = new ObjectSummary(); admin.obtain(summ4).If("value").greater(2).find(); System.out.println(62.0 == summ4.getSummmary()); System.out.println(267.0 == summ4.getPromSalary()); System.out.println(34.0 == summ4.getMaxValue()); System.out.println(5.0 == summ4.getMinValue()); System.out.println(3.0 == summ4.getCountValue()); ObjectSummary summ5 = new ObjectSummary(); admin.obtain(summ5, "value > 10"); System.out.println(57.0 == summ5.getSummmary()); System.out.println(150.25 == summ5.getPromSalary()); System.out.println(34.0 == summ5.getMaxValue()); System.out.println(23.0 == summ5.getMinValue()); System.out.println(2.0 == summ5.getCountValue()); } public void testQueryIfGroupWithSummary(){ if(this.admin.checkTableExist("ObjectSummary")){ System.out.println(this.admin.executeQuery("DROP TABLE ObjectSummary")); } ObjectSummary summ = new ObjectSummary(); summ.setValue(23); summ.setSalary(100.5); System.out.println(admin.save(summ)); ObjectSummary summ2 = new ObjectSummary(); summ2.setValue(5); summ2.setSalary(500.5); System.out.println(admin.save(summ2)); ObjectSummary summ3 = new ObjectSummary(); summ3.setValue(34); summ3.setSalary(200); System.out.println(admin.save(summ3)); ObjectSummary summ4 = new ObjectSummary(); summ4.setValue(23); summ4.setSalary(10); System.out.println(admin.save(summ4)); ObjectSummary summ5 = new ObjectSummary(); summ5.setValue(5); summ5.setSalary(55); System.out.println(admin.save(summ5)); ObjectSummary summ6 = new ObjectSummary(); summ6.setValue(34); summ6.setSalary(800); System.out.println(admin.save(summ6)); ObjectSummary summ7 = new ObjectSummary(); admin.obtain(summ7).If("id").greater(0).group("value").ifGroup("+salary").greater(900).find(); System.out.println(34.0 == summ7.getMaxValue()); System.out.println(34.0 == summ7.getMinValue()); System.out.println(2.0 == summ7.getCountValue()); System.out.println(500.0 == summ7.getPromSalary()); System.out.println(68.0 == summ7.getSummmary()); } }
zzqchy-qaw1
testing/QuickDBTesting/src/main/java/quickdb/query/QueryTest.java
Java
lgpl
15,414