From 4774ed34e52fbe01dd0157c2fd71e7918a6a4ff2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C5=91rinc?=
Date: Fri, 4 Oct 2024 13:38:11 +0200
Subject: [PATCH 01/11] Add benchmark_script.sh
---
benchmark_script.sh | 174 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 174 insertions(+)
create mode 100755 benchmark_script.sh
diff --git a/benchmark_script.sh b/benchmark_script.sh
new file mode 100755
index 000000000000..4503c12ea1ce
--- /dev/null
+++ b/benchmark_script.sh
@@ -0,0 +1,174 @@
+#!/bin/bash
+
+set -ex
+
+# Function to print usage
+usage() {
+ echo "Usage: $0 --storage-path /path/to/storage --bench-name bench_name --runs runs --stopatheight stopatheight --dbcache dbcache --printtoconsole printtoconsole --preseed preseed --crash-interval-seconds crash_interval_seconds --commit-list commit_list"
+ exit 1
+}
+
+while [[ "$#" -gt 0 ]]; do
+ case $1 in
+ --storage-path) export STORAGE_PATH="$2"; shift ;;
+ --bench-name) export BASE_NAME="$2"; shift ;;
+ --runs) export RUNS="$2"; shift ;;
+ --stopatheight) export STOP_AT_HEIGHT="$2"; shift ;;
+ --dbcache) export DBCACHE="$2"; shift ;;
+ --printtoconsole) export PRINT_TO_CONSOLE="$2"; shift ;;
+ --preseed) export PRESEED="$2"; shift ;;
+ --crash-interval-seconds) export CRASH_INTERVAL_SECONDS="$2"; shift ;;
+ --commit-list) export COMMIT_LIST="$2"; shift ;;
+ *) echo "Unknown parameter passed: $1"; usage ;;
+ esac
+ shift
+done
+
+if [ -z "$STORAGE_PATH" ] || [ -z "$BASE_NAME" ] || [ -z "$RUNS" ] || [ -z "$STOP_AT_HEIGHT" ] || [ -z "$DBCACHE" ] || [ -z "$PRINT_TO_CONSOLE" ] || [ -z "$PRESEED" ] || [ -z "$CRASH_INTERVAL_SECONDS" ] || [ -z "$COMMIT_LIST" ]; then
+ usage
+fi
+
+START_DATE=$(date +%Y%m%d%H%M%S)
+export DATA_DIR="$STORAGE_PATH/BitcoinData"
+mkdir -p "$DATA_DIR"
+export PROJECT_DIR="$STORAGE_PATH/${BASE_NAME}_${START_DATE}"
+mkdir -p "$PROJECT_DIR"
+
+export LOG_FILE="$PROJECT_DIR/benchmark.log"
+export JSON_FILE="$PROJECT_DIR/benchmark.json"
+
+echo "Storage Path: $STORAGE_PATH" | tee -a "$LOG_FILE"
+echo "Benchmark Name: $BASE_NAME" | tee -a "$LOG_FILE"
+echo "Runs: $RUNS" | tee -a "$LOG_FILE"
+echo "Stop at Height: $STOP_AT_HEIGHT" | tee -a "$LOG_FILE"
+echo "DB Cache: $DBCACHE" | tee -a "$LOG_FILE"
+echo "Print to Console: $PRINT_TO_CONSOLE" | tee -a "$LOG_FILE"
+echo "Preseed: $PRESEED" | tee -a "$LOG_FILE"
+echo "Crash Interval in Seconds: $CRASH_INTERVAL_SECONDS" | tee -a "$LOG_FILE"
+echo "Commit List: $COMMIT_LIST" | tee -a "$LOG_FILE"
+
+prepare_function() {
+ echo "Starting prepare step at commit $COMMIT at $(date)" | tee -a "$LOG_FILE"
+
+ killall bitcoind vmstat || true
+
+ git checkout "$COMMIT" || { echo "Failed to checkout commit $COMMIT" | tee -a "$LOG_FILE"; exit 1; }
+ COMMIT_MSG=$(git log --format=%B -n 1)
+ echo "Preparing commit: $COMMIT: $COMMIT_MSG" | tee -a "$LOG_FILE"
+
+ # Build Bitcoin Core
+ cmake -B build -DWITH_ZSTD=ON -DCMAKE_BUILD_TYPE=Release -DBUILD_UTIL=OFF -DBUILD_TX=OFF -DBUILD_TESTS=OFF -DENABLE_WALLET=OFF -DINSTALL_MAN=OFF
+ cmake --build build -j$(nproc) || { echo "Build failed at commit $COMMIT" | tee -a "$LOG_FILE"; exit 1; }
+
+ # Cleanup data directory and caches
+ rm -rf "$DATA_DIR"/*
+ sync
+ echo 3 > /proc/sys/vm/drop_caches
+ echo "Cleared data directory and dropped caches at commit $COMMIT at $(date)" | tee -a "$LOG_FILE"
+
+ # Preseed bitcoind if option is enabled
+ if [ "$PRESEED" = true ]; then
+ echo "Starting bitcoind with large dbcache for preseed at commit: $COMMIT: '$COMMIT_MSG' at $(date)" | tee -a "$LOG_FILE"
+ ./build/src/bitcoind -datadir="$DATA_DIR" -dbcache=10000 -stopatheight=1 -printtoconsole=1
+ echo "Preseed complete at $(date)" | tee -a "$LOG_FILE"
+ fi
+
+ echo "Finished prepare step at commit $COMMIT at $(date)" | tee -a "$LOG_FILE"
+}
+export -f prepare_function
+
+# Run and crash bitcoind periodically and restart it until it exits successfully
+run_and_crash_bitcoind_periodically() {
+ local DB_CRASH_RATIO=0
+ while true; do
+ # Crash bitcoind at intervals if CRASH_INTERVAL_SECONDS is set
+ if [[ "$CRASH_INTERVAL_SECONDS" -gt 0 ]]; then
+ DB_CRASH_RATIO=2 # 50% chance of crashing
+
+ {
+ sleep "$CRASH_INTERVAL_SECONDS"
+ echo "Killing bitcoind with SIGKILL after $CRASH_INTERVAL_SECONDS seconds." | tee -a "$LOG_FILE"
+ killall -SIGKILL bitcoind || true
+ } &
+ fi
+
+ echo "Starting bitcoind process with commit $COMMIT at $(date)" | tee -a "$LOG_FILE"
+ ./build/src/bitcoind -datadir="$DATA_DIR" -stopatheight="$STOP_AT_HEIGHT" -dbcache="$DBCACHE" -printtoconsole="$PRINT_TO_CONSOLE" -dbcrashratio="$DB_CRASH_RATIO" -maxmempool=5 -blocksonly
+ if [ $? -eq 0 ]; then
+ echo "bitcoind finished successfully with exit code 0" | tee -a "$LOG_FILE"
+ break
+ else
+ echo "bitcoind crashed, restarting..." | tee -a "$LOG_FILE"
+ fi
+ done
+}
+export -f run_and_crash_bitcoind_periodically
+
+run_bitcoind_with_monitoring() {
+ echo "run_bitcoind_with_monitoring:" | tee -a "$LOG_FILE"
+
+ COMMIT_MSG=$(git log --format=%B -n 1)
+ echo "Measuring commit: $COMMIT: '$COMMIT_MSG'" | tee -a "$LOG_FILE"
+
+ # Start vmstat monitoring
+ vmstat 1 > "$PROJECT_DIR/vmstat_${COMMIT}_$(date +%Y%m%d%H%M%S).log" &
+ VMSTAT_PID=$!
+
+ run_and_crash_bitcoind_periodically
+
+ echo "VMSTAT monitoring at commit $COMMIT at $(date)" | tee -a "$LOG_FILE"
+ vmstat -s | tee -a "$LOG_FILE"
+ kill $VMSTAT_PID
+}
+export -f run_bitcoind_with_monitoring
+
+cleanup_function() {
+ echo "cleanup_function:" | tee -a "$LOG_FILE"
+
+ {
+ # Log data directory stats
+ echo "Data directory size after benchmark at commit $COMMIT: $(du -sh "$DATA_DIR" | cut -f1)"
+ echo "Number of files in data directory: $(find "$DATA_DIR" -type f | wc -l)"
+ } | tee -a "$LOG_FILE"
+
+ echo "Starting bitcoind for $COMMIT at $(date)" | tee -a "$LOG_FILE"
+ ./build/src/bitcoind -daemon -datadir="$DATA_DIR" -dbcache="$DBCACHE" -printtoconsole=0 && sleep 10
+
+ {
+ echo "Benchmarking gettxoutsetinfo at $(date)"
+ time ./build/src/bitcoin-cli -datadir="$DATA_DIR" gettxoutsetinfo
+ } 2>&1 | tee -a "$LOG_FILE"
+
+ echo "Stopping bitcoind for $COMMIT at $(date)" | tee -a "$LOG_FILE"
+ ./build/src/bitcoin-cli -datadir="$DATA_DIR" stop && sleep 10
+ killall bitcoind vmstat || true
+
+ echo "Ended $COMMIT: $COMMIT_MSG at $(date)" | tee -a "$LOG_FILE"
+}
+export -f cleanup_function
+
+run_benchmarks() {
+ hyperfine \
+ --shell=bash \
+ --runs "$RUNS" \
+ --show-output \
+ --export-json "$JSON_FILE" \
+ --parameter-list COMMIT "$COMMIT_LIST" \
+ --prepare 'COMMIT={COMMIT} prepare_function' \
+ --cleanup 'COMMIT={COMMIT} cleanup_function' \
+ "COMMIT={COMMIT} run_bitcoind_with_monitoring"
+}
+
+# Example usage of the benchmark script with parameter names
+# ./benchmark_script.sh \
+# --storage-path /mnt/my_storage \
+# --bench-name rocksdb-bench \
+# --runs 1 \
+# --stopatheight 100000 \
+# --dbcache 2000 \
+# --printtoconsole 0 \
+# --preseed true \
+# --crash-interval-seconds 0 \
+# --commit-list "6c180ad76776d1ef21160daecda72bba94fd07ed,d6fc509508c727c8755933293d8e5d06e4421a16,d7ae58fb0275a258966925ba832491efa1b842a1"
+
+run_benchmarks
\ No newline at end of file
From 286a31c2c101d63e65bf868f24263f032ab9472b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C5=91rinc?=
Date: Sun, 29 Sep 2024 23:05:45 +0200
Subject: [PATCH 02/11] Overwrite LevelDB folder with latest from
https://github.com/google/leveldb
---
src/leveldb/.appveyor.yml | 35 -
src/leveldb/.clang-format | 18 -
src/leveldb/.gitignore | 8 -
src/leveldb/.travis.yml | 82 ---
src/leveldb/CMakeLists.txt | 176 +++--
src/leveldb/CONTRIBUTING.md | 49 +-
src/leveldb/README.md | 33 +-
src/leveldb/benchmarks/db_bench.cc | 423 ++++++++----
src/leveldb/benchmarks/db_bench_log.cc | 92 +++
src/leveldb/benchmarks/db_bench_sqlite3.cc | 112 ++--
src/leveldb/benchmarks/db_bench_tree_db.cc | 105 +--
src/leveldb/cmake/leveldbConfig.cmake | 1 -
src/leveldb/cmake/leveldbConfig.cmake.in | 9 +
src/leveldb/db/autocompact_test.cc | 28 +-
src/leveldb/db/builder.cc | 9 +-
src/leveldb/db/c.cc | 17 +-
src/leveldb/db/corruption_test.cc | 80 +--
src/leveldb/db/db_impl.cc | 81 ++-
src/leveldb/db/db_impl.h | 2 +-
src/leveldb/db/db_iter.cc | 4 +-
src/leveldb/db/db_iter.h | 2 +-
src/leveldb/db/db_test.cc | 684 +++++++++++---------
src/leveldb/db/dbformat.cc | 5 +-
src/leveldb/db/dbformat_test.cc | 7 +-
src/leveldb/db/dumpfile.cc | 2 +-
src/leveldb/db/fault_injection_test.cc | 62 +-
src/leveldb/db/filename.cc | 14 +-
src/leveldb/db/filename.h | 3 +-
src/leveldb/db/filename_test.cc | 6 +-
src/leveldb/db/leveldbutil.cc | 12 +-
src/leveldb/db/log_reader.cc | 6 +-
src/leveldb/db/log_reader.h | 4 +-
src/leveldb/db/log_test.cc | 88 ++-
src/leveldb/db/log_writer.cc | 2 +-
src/leveldb/db/log_writer.h | 2 +-
src/leveldb/db/memtable.cc | 5 +-
src/leveldb/db/recovery_test.cc | 87 +--
src/leveldb/db/repair.cc | 11 +-
src/leveldb/db/skiplist.h | 4 +-
src/leveldb/db/skiplist_test.cc | 15 +-
src/leveldb/db/snapshot.h | 2 +-
src/leveldb/db/table_cache.h | 7 +-
src/leveldb/db/version_edit.cc | 3 +-
src/leveldb/db/version_edit.h | 2 +-
src/leveldb/db/version_edit_test.cc | 9 +-
src/leveldb/db/version_set.cc | 39 +-
src/leveldb/db/version_set.h | 6 +-
src/leveldb/db/version_set_test.cc | 35 +-
src/leveldb/db/write_batch_test.cc | 11 +-
src/leveldb/doc/benchmark.html | 16 +-
src/leveldb/doc/impl.md | 4 +-
src/leveldb/doc/index.md | 13 +-
src/leveldb/helpers/memenv/memenv.cc | 22 +-
src/leveldb/helpers/memenv/memenv_test.cc | 134 ++--
src/leveldb/include/leveldb/c.h | 8 +-
src/leveldb/include/leveldb/cache.h | 10 +-
src/leveldb/include/leveldb/db.h | 6 +-
src/leveldb/include/leveldb/env.h | 89 ++-
src/leveldb/include/leveldb/options.h | 11 +-
src/leveldb/include/leveldb/slice.h | 10 +-
src/leveldb/include/leveldb/status.h | 4 -
src/leveldb/include/leveldb/table.h | 2 +-
src/leveldb/include/leveldb/table_builder.h | 2 +-
src/leveldb/issues/issue178_test.cc | 19 +-
src/leveldb/issues/issue200_test.cc | 23 +-
src/leveldb/issues/issue320_test.cc | 16 +-
src/leveldb/port/port_config.h.in | 11 +-
src/leveldb/port/port_example.h | 28 +-
src/leveldb/port/port_stdcxx.h | 74 ++-
src/leveldb/table/block.cc | 27 +-
src/leveldb/table/block.h | 4 +-
src/leveldb/table/block_builder.cc | 3 +-
src/leveldb/table/block_builder.h | 3 +-
src/leveldb/table/filter_block.h | 5 +-
src/leveldb/table/filter_block_test.cc | 12 +-
src/leveldb/table/format.cc | 33 +-
src/leveldb/table/format.h | 3 +-
src/leveldb/table/table.cc | 10 +-
src/leveldb/table/table_builder.cc | 17 +-
src/leveldb/table/table_test.cc | 95 +--
src/leveldb/util/arena_test.cc | 6 +-
src/leveldb/util/bloom_test.cc | 28 +-
src/leveldb/util/cache.cc | 13 +-
src/leveldb/util/cache_test.cc | 28 +-
src/leveldb/util/coding.cc | 10 -
src/leveldb/util/coding.h | 50 +-
src/leveldb/util/coding_test.cc | 9 +-
src/leveldb/util/crc32c.cc | 4 +-
src/leveldb/util/crc32c.h | 4 +-
src/leveldb/util/crc32c_test.cc | 7 +-
src/leveldb/util/env.cc | 20 +-
src/leveldb/util/env_posix.cc | 76 ++-
src/leveldb/util/env_posix_test.cc | 93 +--
src/leveldb/util/env_test.cc | 117 ++--
src/leveldb/util/env_windows.cc | 135 ++--
src/leveldb/util/env_windows_test.cc | 23 +-
src/leveldb/util/hash.cc | 2 +-
src/leveldb/util/hash.h | 4 +-
src/leveldb/util/hash_test.cc | 7 +-
src/leveldb/util/histogram.cc | 24 +-
src/leveldb/util/logging.cc | 14 +-
src/leveldb/util/logging.h | 5 +-
src/leveldb/util/logging_test.cc | 9 +-
src/leveldb/util/no_destructor_test.cc | 9 +-
src/leveldb/util/posix_logger.h | 8 +-
src/leveldb/util/random.h | 2 +-
src/leveldb/util/status.cc | 18 +-
src/leveldb/util/status_test.cc | 7 +-
src/leveldb/util/testharness.cc | 81 ---
src/leveldb/util/testharness.h | 141 ----
src/leveldb/util/testutil.cc | 2 +
src/leveldb/util/testutil.h | 16 +
src/leveldb/util/windows_logger.h | 8 +-
113 files changed, 2181 insertions(+), 2012 deletions(-)
delete mode 100644 src/leveldb/.appveyor.yml
delete mode 100644 src/leveldb/.clang-format
delete mode 100644 src/leveldb/.gitignore
delete mode 100644 src/leveldb/.travis.yml
create mode 100644 src/leveldb/benchmarks/db_bench_log.cc
delete mode 100644 src/leveldb/cmake/leveldbConfig.cmake
create mode 100644 src/leveldb/cmake/leveldbConfig.cmake.in
delete mode 100644 src/leveldb/util/testharness.cc
delete mode 100644 src/leveldb/util/testharness.h
diff --git a/src/leveldb/.appveyor.yml b/src/leveldb/.appveyor.yml
deleted file mode 100644
index c24b17e80508..000000000000
--- a/src/leveldb/.appveyor.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Build matrix / environment variables are explained on:
-# https://www.appveyor.com/docs/appveyor-yml/
-# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
-
-version: "{build}"
-
-environment:
- matrix:
- # AppVeyor currently has no custom job name feature.
- # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
- - JOB: Visual Studio 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
- CMAKE_GENERATOR: Visual Studio 15 2017
-
-platform:
- - x86
- - x64
-
-configuration:
- - RelWithDebInfo
- - Debug
-
-build_script:
- - git submodule update --init --recursive
- - mkdir build
- - cd build
- - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
- - cmake --version
- - cmake .. -G "%CMAKE_GENERATOR%"
- -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
- - cmake --build . --config "%CONFIGURATION%"
- - cd ..
-
-test_script:
- - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
diff --git a/src/leveldb/.clang-format b/src/leveldb/.clang-format
deleted file mode 100644
index f493f75382cc..000000000000
--- a/src/leveldb/.clang-format
+++ /dev/null
@@ -1,18 +0,0 @@
-# Run manually to reformat a file:
-# clang-format -i --style=file
-# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
-BasedOnStyle: Google
-DerivePointerAlignment: false
-
-# Public headers are in a different location in the internal Google repository.
-# Order them so that when imported to the authoritative repository they will be
-# in correct alphabetical order.
-IncludeCategories:
- - Regex: '^(<|"(benchmarks|db|helpers)/)'
- Priority: 1
- - Regex: '^"(leveldb)/'
- Priority: 2
- - Regex: '^(<|"(issues|port|table|third_party|util)/)'
- Priority: 3
- - Regex: '.*'
- Priority: 4
diff --git a/src/leveldb/.gitignore b/src/leveldb/.gitignore
deleted file mode 100644
index c4b242534fb4..000000000000
--- a/src/leveldb/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-# Editors.
-*.sw*
-.vscode
-.DS_Store
-
-# Build directory.
-build/
-out/
diff --git a/src/leveldb/.travis.yml b/src/leveldb/.travis.yml
deleted file mode 100644
index 42cbe64fd0ed..000000000000
--- a/src/leveldb/.travis.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# Build matrix / environment variables are explained on:
-# http://about.travis-ci.org/docs/user/build-configuration/
-# This file can be validated on: http://lint.travis-ci.org/
-
-language: cpp
-dist: bionic
-osx_image: xcode10.3
-
-compiler:
-- gcc
-- clang
-os:
-- linux
-- osx
-
-env:
-- BUILD_TYPE=Debug
-- BUILD_TYPE=RelWithDebInfo
-
-addons:
- apt:
- sources:
- - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
- key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
- - sourceline: 'ppa:ubuntu-toolchain-r/test'
- packages:
- - clang-9
- - cmake
- - gcc-9
- - g++-9
- - libgoogle-perftools-dev
- - libkyotocabinet-dev
- - libsnappy-dev
- - libsqlite3-dev
- - ninja-build
- homebrew:
- packages:
- - cmake
- - crc32c
- - gcc@9
- - gperftools
- - kyoto-cabinet
- - llvm@9
- - ninja
- - snappy
- - sqlite3
- update: true
-
-install:
-# The following Homebrew packages aren't linked by default, and need to be
-# prepended to the path explicitly.
-- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
- export PATH="$(brew --prefix llvm)/bin:$PATH";
- fi
-# /usr/bin/gcc points to an older compiler on both Linux and macOS.
-- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
-# /usr/bin/clang points to an older compiler on both Linux and macOS.
-#
-# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
-# below don't work on macOS. Fortunately, the path change above makes the
-# default values (clang and clang++) resolve to the correct compiler on macOS.
-- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
- if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
- fi
-- echo ${CC}
-- echo ${CXX}
-- ${CXX} --version
-- cmake --version
-
-before_script:
-- mkdir -p build && cd build
-- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
- -DCMAKE_INSTALL_PREFIX=$HOME/.local
-- cmake --build .
-- cd ..
-
-script:
-- cd build && ctest --verbose && cd ..
-- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
-- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
-- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
-- cd build && cmake --build . --target install
diff --git a/src/leveldb/CMakeLists.txt b/src/leveldb/CMakeLists.txt
index 1cb46256c294..fda9e01bbb68 100644
--- a/src/leveldb/CMakeLists.txt
+++ b/src/leveldb/CMakeLists.txt
@@ -4,17 +4,23 @@
cmake_minimum_required(VERSION 3.9)
# Keep the version below in sync with the one in db.h
-project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
-
-# This project can use C11, but will gracefully decay down to C89.
-set(CMAKE_C_STANDARD 11)
-set(CMAKE_C_STANDARD_REQUIRED OFF)
-set(CMAKE_C_EXTENSIONS OFF)
-
-# This project requires C++11.
-set(CMAKE_CXX_STANDARD 11)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
-set(CMAKE_CXX_EXTENSIONS OFF)
+project(leveldb VERSION 1.23.0 LANGUAGES C CXX)
+
+# C standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_C_STANDARD)
+ # This project can use C11, but will gracefully decay down to C89.
+ set(CMAKE_C_STANDARD 11)
+ set(CMAKE_C_STANDARD_REQUIRED OFF)
+ set(CMAKE_C_EXTENSIONS OFF)
+endif(NOT CMAKE_C_STANDARD)
+
+# C++ standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_CXX_STANDARD)
+ # This project requires C++11.
+ set(CMAKE_CXX_STANDARD 11)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+endif(NOT CMAKE_CXX_STANDARD)
if (WIN32)
set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
@@ -28,15 +34,13 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
-include(TestBigEndian)
-test_big_endian(LEVELDB_IS_BIG_ENDIAN)
-
include(CheckIncludeFile)
check_include_file("unistd.h" HAVE_UNISTD_H)
include(CheckLibraryExists)
check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
+check_library_exists(zstd zstd_compress "" HAVE_ZSTD)
check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
include(CheckCXXSymbolExists)
@@ -78,6 +82,10 @@ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
+# Used by googletest.
+check_cxx_compiler_flag(-Wno-missing-field-initializers
+ LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
include(CheckCXXSourceCompiles)
# Test whether C++17 __has_include is available.
@@ -266,6 +274,9 @@ endif(HAVE_CRC32C)
if(HAVE_SNAPPY)
target_link_libraries(leveldb snappy)
endif(HAVE_SNAPPY)
+if(HAVE_ZSTD)
+ target_link_libraries(leveldb zstd)
+endif(HAVE_ZSTD)
if(HAVE_TCMALLOC)
target_link_libraries(leveldb tcmalloc)
endif(HAVE_TCMALLOC)
@@ -282,6 +293,77 @@ target_link_libraries(leveldbutil leveldb)
if(LEVELDB_BUILD_TESTS)
enable_testing()
+ # Prevent overriding the parent project's compiler/linker settings on Windows.
+ set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
+ set(install_gtest OFF)
+ set(install_gmock OFF)
+ set(build_gmock ON)
+
+ # This project is tested using GoogleTest.
+ add_subdirectory("third_party/googletest")
+
+ # GoogleTest triggers a missing field initializers warning.
+ if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+ set_property(TARGET gtest
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ set_property(TARGET gmock
+ APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
+ endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
+
+ add_executable(leveldb_tests "")
+ target_sources(leveldb_tests
+ PRIVATE
+ # "db/fault_injection_test.cc"
+ # "issues/issue178_test.cc"
+ # "issues/issue200_test.cc"
+ # "issues/issue320_test.cc"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ # "util/env_test.cc"
+ "util/status_test.cc"
+ "util/no_destructor_test.cc"
+ "util/testutil.cc"
+ "util/testutil.h"
+ )
+ if(NOT BUILD_SHARED_LIBS)
+ target_sources(leveldb_tests
+ PRIVATE
+ "db/autocompact_test.cc"
+ "db/corruption_test.cc"
+ "db/db_test.cc"
+ "db/dbformat_test.cc"
+ "db/filename_test.cc"
+ "db/log_test.cc"
+ "db/recovery_test.cc"
+ "db/skiplist_test.cc"
+ "db/version_edit_test.cc"
+ "db/version_set_test.cc"
+ "db/write_batch_test.cc"
+ "helpers/memenv/memenv_test.cc"
+ "table/filter_block_test.cc"
+ "table/table_test.cc"
+ "util/arena_test.cc"
+ "util/bloom_test.cc"
+ "util/cache_test.cc"
+ "util/coding_test.cc"
+ "util/crc32c_test.cc"
+ "util/hash_test.cc"
+ "util/logging_test.cc"
+ )
+ endif(NOT BUILD_SHARED_LIBS)
+ target_link_libraries(leveldb_tests leveldb gmock gtest gtest_main)
+ target_compile_definitions(leveldb_tests
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb_tests
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "leveldb_tests" COMMAND "leveldb_tests")
+
function(leveldb_test test_file)
get_filename_component(test_target_name "${test_file}" NAME_WE)
@@ -289,14 +371,12 @@ if(LEVELDB_BUILD_TESTS)
target_sources("${test_target_name}"
PRIVATE
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
- "util/testharness.cc"
- "util/testharness.h"
"util/testutil.cc"
"util/testutil.h"
"${test_file}"
)
- target_link_libraries("${test_target_name}" leveldb)
+ target_link_libraries("${test_target_name}" leveldb gmock gtest)
target_compile_definitions("${test_target_name}"
PRIVATE
${LEVELDB_PLATFORM_NAME}=1
@@ -312,42 +392,8 @@ if(LEVELDB_BUILD_TESTS)
endfunction(leveldb_test)
leveldb_test("db/c_test.c")
- leveldb_test("db/fault_injection_test.cc")
-
- leveldb_test("issues/issue178_test.cc")
- leveldb_test("issues/issue200_test.cc")
- leveldb_test("issues/issue320_test.cc")
-
- leveldb_test("util/env_test.cc")
- leveldb_test("util/status_test.cc")
- leveldb_test("util/no_destructor_test.cc")
if(NOT BUILD_SHARED_LIBS)
- leveldb_test("db/autocompact_test.cc")
- leveldb_test("db/corruption_test.cc")
- leveldb_test("db/db_test.cc")
- leveldb_test("db/dbformat_test.cc")
- leveldb_test("db/filename_test.cc")
- leveldb_test("db/log_test.cc")
- leveldb_test("db/recovery_test.cc")
- leveldb_test("db/skiplist_test.cc")
- leveldb_test("db/version_edit_test.cc")
- leveldb_test("db/version_set_test.cc")
- leveldb_test("db/write_batch_test.cc")
-
- leveldb_test("helpers/memenv/memenv_test.cc")
-
- leveldb_test("table/filter_block_test.cc")
- leveldb_test("table/table_test.cc")
-
- leveldb_test("util/arena_test.cc")
- leveldb_test("util/bloom_test.cc")
- leveldb_test("util/cache_test.cc")
- leveldb_test("util/coding_test.cc")
- leveldb_test("util/crc32c_test.cc")
- leveldb_test("util/hash_test.cc")
- leveldb_test("util/logging_test.cc")
-
# TODO(costan): This test also uses
# "util/env_{posix|windows}_test_helper.h"
if (WIN32)
@@ -359,6 +405,11 @@ if(LEVELDB_BUILD_TESTS)
endif(LEVELDB_BUILD_TESTS)
if(LEVELDB_BUILD_BENCHMARKS)
+ # This project uses Google benchmark for benchmarking.
+ set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
+ set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
+ add_subdirectory("third_party/benchmark")
+
function(leveldb_benchmark bench_file)
get_filename_component(bench_target_name "${bench_file}" NAME_WE)
@@ -368,14 +419,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
"util/histogram.cc"
"util/histogram.h"
- "util/testharness.cc"
- "util/testharness.h"
"util/testutil.cc"
"util/testutil.h"
"${bench_file}"
)
- target_link_libraries("${bench_target_name}" leveldb)
+ target_link_libraries("${bench_target_name}" leveldb gmock gtest benchmark)
target_compile_definitions("${bench_target_name}"
PRIVATE
${LEVELDB_PLATFORM_NAME}=1
@@ -443,23 +492,28 @@ if(LEVELDB_INSTALL)
"${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
"${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
"${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
- DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/leveldb"
)
include(CMakePackageConfigHelpers)
+ configure_package_config_file(
+ "cmake/${PROJECT_NAME}Config.cmake.in"
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+ INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+ )
write_basic_package_version_file(
- "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
- COMPATIBILITY SameMajorVersion
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
)
install(
EXPORT leveldbTargets
NAMESPACE leveldb::
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
)
install(
FILES
- "cmake/leveldbConfig.cmake"
- "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
+ "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
)
endif(LEVELDB_INSTALL)
diff --git a/src/leveldb/CONTRIBUTING.md b/src/leveldb/CONTRIBUTING.md
index a74572a59639..3cf27bb40228 100644
--- a/src/leveldb/CONTRIBUTING.md
+++ b/src/leveldb/CONTRIBUTING.md
@@ -1,36 +1,31 @@
-# Contributing
+# How to Contribute
-We'd love to accept your code patches! However, before we can take them, we
-have to jump a couple of legal hurdles.
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
-## Contributor License Agreements
+## Contributor License Agreement
-Please fill out either the individual or corporate Contributor License
-Agreement as appropriate.
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
-* If you are an individual writing original source code and you're sure you
-own the intellectual property, then sign an [individual CLA](https://developers.google.com/open-source/cla/individual).
-* If you work for a company that wants to allow you to contribute your work,
-then sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate).
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
-Follow either of the two links above to access the appropriate CLA and
-instructions for how to sign and return it.
+## Code Reviews
-## Submitting a Patch
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
-1. Sign the contributors license agreement above.
-2. Decide which code you want to submit. A submission should be a set of changes
-that addresses one issue in the [issue tracker](https://github.com/google/leveldb/issues).
-Please don't mix more than one logical change per submission, because it makes
-the history hard to follow. If you want to make a change
-(e.g. add a sample or feature) that doesn't have a corresponding issue in the
-issue tracker, please create one.
-3. **Submitting**: When you are ready to submit, send us a Pull Request. Be
-sure to include the issue number you fixed and the name you used to sign
-the CLA.
+See [the README](README.md#contributing-to-the-leveldb-project) for areas
+where we are likely to accept external contributions.
-## Writing Code ##
+## Community Guidelines
-If your contribution contains code, please make sure that it follows
-[the style guide](http://google.github.io/styleguide/cppguide.html).
-Otherwise we will have to ask you to make changes, and that's no fun for anyone.
+This project follows [Google's Open Source Community
+Guidelines](https://opensource.google/conduct/).
\ No newline at end of file
diff --git a/src/leveldb/README.md b/src/leveldb/README.md
index dadfd5693ead..a5e541604df2 100644
--- a/src/leveldb/README.md
+++ b/src/leveldb/README.md
@@ -1,7 +1,11 @@
-**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
+LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.
-[](https://travis-ci.org/google/leveldb)
-[](https://ci.appveyor.com/project/pwnall/leveldb)
+> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
+>
+> * Fixes for critical bugs, such as data loss or memory corruption
+> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
+
+[](https://github.com/google/leveldb/actions/workflows/build.yml)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
@@ -14,12 +18,12 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Multiple changes can be made in one atomic batch.
* Users can create a transient snapshot to get a consistent view of data.
* Forward and backward iteration is supported over the data.
- * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/).
+ * Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/), but [Zstd compression](https://facebook.github.io/zstd/) is also supported.
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation
- [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
+ [LevelDB library documentation](https://github.com/google/leveldb/blob/main/doc/index.md) is online and bundled with the source code.
# Limitations
@@ -27,6 +31,12 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Getting the Source
+
+```bash
+git clone --recurse-submodules https://github.com/google/leveldb.git
+```
+
# Building
This project supports [CMake](https://cmake.org/) out of the box.
@@ -67,6 +77,11 @@ Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
# Contributing to the leveldb Project
+> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
+>
+> * Bug fixes
+> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
+
The leveldb project welcomes contributions. leveldb's primary goal is to be
a reliable and fast key/value store. Changes that are in line with the
features/limitations outlined above, and meet the requirements below,
@@ -94,6 +109,12 @@ Contribution requirements:
clang-format -i --style=file
```
+We are unlikely to accept contributions to the build configuration files, such
+as `CMakeLists.txt`. We are focused on maintaining a build configuration that
+allows us to test that the project works in a few supported configurations
+inside Google. We are not currently interested in supporting other requirements,
+such as different operating systems, compilers, or build systems.
+
## Submitting a Pull Request
Before any pull request will be accepted the author must first sign a
@@ -102,7 +123,7 @@ Contributor License Agreement (CLA) at https://cla.developers.google.com/.
In order to keep the commit timeline linear
[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits)
your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase)
-on google/leveldb/master. This keeps the commit timeline linear and more easily sync'ed
+on google/leveldb/main. This keeps the commit timeline linear and more easily sync'ed
with the internal repository at Google. More information at GitHub's
[About Git rebase](https://help.github.com/articles/about-git-rebase/) page.
diff --git a/src/leveldb/benchmarks/db_bench.cc b/src/leveldb/benchmarks/db_bench.cc
index 3696023b702f..8e3f4e7b1281 100644
--- a/src/leveldb/benchmarks/db_bench.cc
+++ b/src/leveldb/benchmarks/db_bench.cc
@@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include
-#include
#include
+#include
+#include
+#include
+
#include "leveldb/cache.h"
+#include "leveldb/comparator.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
@@ -33,6 +36,7 @@
// readmissing -- read N missing keys in random order
// readhot -- read N times in random order from 1% section of DB
// seekrandom -- N random seeks
+// seekordered -- N ordered seeks
// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
// Meta operations:
@@ -56,7 +60,9 @@ static const char* FLAGS_benchmarks =
"fill100K,"
"crc32c,"
"snappycomp,"
- "snappyuncomp,";
+ "snappyuncomp,"
+ "zstdcomp,"
+ "zstduncomp,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
@@ -77,6 +83,9 @@ static double FLAGS_compression_ratio = 0.5;
// Print histogram of operation timings
static bool FLAGS_histogram = false;
+// Count the number of string comparisons performed
+static bool FLAGS_comparisons = false;
+
// Number of bytes to buffer in memtable before compacting
// (initialized to default value by "main")
static int FLAGS_write_buffer_size = 0;
@@ -100,6 +109,9 @@ static int FLAGS_open_files = 0;
// Negative means use default settings.
static int FLAGS_bloom_bits = -1;
+// Common key prefix length.
+static int FLAGS_key_prefix = 0;
+
// If true, do not destroy the existing database. If you set this
// flag and also specify a benchmark that wants a fresh database, that
// benchmark will fail.
@@ -108,14 +120,47 @@ static bool FLAGS_use_existing_db = false;
// If true, reuse existing log/MANIFEST files when re-opening a database.
static bool FLAGS_reuse_logs = false;
+// If true, use compression.
+static bool FLAGS_compression = true;
+
// Use the db with the following name.
static const char* FLAGS_db = nullptr;
+// ZSTD compression level to try out
+static int FLAGS_zstd_compression_level = 1;
+
namespace leveldb {
namespace {
leveldb::Env* g_env = nullptr;
+class CountComparator : public Comparator {
+ public:
+ CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
+ ~CountComparator() override {}
+ int Compare(const Slice& a, const Slice& b) const override {
+ count_.fetch_add(1, std::memory_order_relaxed);
+ return wrapped_->Compare(a, b);
+ }
+ const char* Name() const override { return wrapped_->Name(); }
+ void FindShortestSeparator(std::string* start,
+ const Slice& limit) const override {
+ wrapped_->FindShortestSeparator(start, limit);
+ }
+
+ void FindShortSuccessor(std::string* key) const override {
+ return wrapped_->FindShortSuccessor(key);
+ }
+
+ size_t comparisons() const { return count_.load(std::memory_order_relaxed); }
+
+ void reset() { count_.store(0, std::memory_order_relaxed); }
+
+ private:
+ mutable std::atomic count_{0};
+ const Comparator* const wrapped_;
+};
+
// Helper for quickly generating random data.
class RandomGenerator {
private:
@@ -148,6 +193,26 @@ class RandomGenerator {
}
};
+class KeyBuffer {
+ public:
+ KeyBuffer() {
+ assert(FLAGS_key_prefix < sizeof(buffer_));
+ memset(buffer_, 'a', FLAGS_key_prefix);
+ }
+ KeyBuffer& operator=(KeyBuffer& other) = delete;
+ KeyBuffer(KeyBuffer& other) = delete;
+
+ void Set(int k) {
+ std::snprintf(buffer_ + FLAGS_key_prefix,
+ sizeof(buffer_) - FLAGS_key_prefix, "%016d", k);
+ }
+
+ Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
+
+ private:
+ char buffer_[1024];
+};
+
#if defined(__linux)
static Slice TrimSpace(Slice s) {
size_t start = 0;
@@ -220,8 +285,8 @@ class Stats {
double micros = now - last_op_finish_;
hist_.Add(micros);
if (micros > 20000) {
- fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
- fflush(stderr);
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
}
last_op_finish_ = now;
}
@@ -242,8 +307,8 @@ class Stats {
next_report_ += 50000;
else
next_report_ += 100000;
- fprintf(stderr, "... finished %d ops%30s\r", done_, "");
- fflush(stderr);
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
}
}
@@ -260,18 +325,20 @@ class Stats {
// elapsed times.
double elapsed = (finish_ - start_) * 1e-6;
char rate[100];
- snprintf(rate, sizeof(rate), "%6.1f MB/s",
- (bytes_ / 1048576.0) / elapsed);
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / elapsed);
extra = rate;
}
AppendWithSpace(&extra, message_);
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
- seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), seconds_ * 1e6 / done_,
+ (extra.empty() ? "" : " "), extra.c_str());
if (FLAGS_histogram) {
- fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
}
- fflush(stdout);
+ std::fflush(stdout);
}
};
@@ -302,9 +369,60 @@ struct ThreadState {
Stats stats;
SharedState* shared;
- ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
+ ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
};
+void Compress(
+ ThreadState* thread, std::string name,
+ std::function compress_func) {
+ RandomGenerator gen;
+ Slice input = gen.Generate(Options().block_size);
+ int64_t bytes = 0;
+ int64_t produced = 0;
+ bool ok = true;
+ std::string compressed;
+ while (ok && bytes < 1024 * 1048576) { // Compress 1G
+ ok = compress_func(input.data(), input.size(), &compressed);
+ produced += compressed.size();
+ bytes += input.size();
+ thread->stats.FinishedSingleOp();
+ }
+
+ if (!ok) {
+ thread->stats.AddMessage("(" + name + " failure)");
+ } else {
+ char buf[100];
+ std::snprintf(buf, sizeof(buf), "(output: %.1f%%)",
+ (produced * 100.0) / bytes);
+ thread->stats.AddMessage(buf);
+ thread->stats.AddBytes(bytes);
+ }
+}
+
+void Uncompress(
+ ThreadState* thread, std::string name,
+ std::function compress_func,
+ std::function uncompress_func) {
+ RandomGenerator gen;
+ Slice input = gen.Generate(Options().block_size);
+ std::string compressed;
+ bool ok = compress_func(input.data(), input.size(), &compressed);
+ int64_t bytes = 0;
+ char* uncompressed = new char[input.size()];
+ while (ok && bytes < 1024 * 1048576) { // Compress 1G
+ ok = uncompress_func(compressed.data(), compressed.size(), uncompressed);
+ bytes += input.size();
+ thread->stats.FinishedSingleOp();
+ }
+ delete[] uncompressed;
+
+ if (!ok) {
+ thread->stats.AddMessage("(" + name + " failure)");
+ } else {
+ thread->stats.AddBytes(bytes);
+ }
+}
+
} // namespace
class Benchmark {
@@ -318,55 +436,61 @@ class Benchmark {
WriteOptions write_options_;
int reads_;
int heap_counter_;
+ CountComparator count_comparator_;
+ int total_thread_count_;
void PrintHeader() {
- const int kKeySize = 16;
+ const int kKeySize = 16 + FLAGS_key_prefix;
PrintEnvironment();
- fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
- fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n",
- FLAGS_value_size,
- static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
- fprintf(stdout, "Entries: %d\n", num_);
- fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast(kKeySize + FLAGS_value_size) * num_) /
- 1048576.0));
- fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
- 1048576.0));
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(
+ stdout, "Values: %d bytes each (%d bytes after compression)\n",
+ FLAGS_value_size,
+ static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
+ std::fprintf(
+ stdout, "FileSize: %.1f MB (estimated)\n",
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
- fprintf(stdout, "------------------------------------------------\n");
+ std::fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(
+ std::fprintf(
stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
- fprintf(stdout,
- "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif
// See if snappy is working by attempting to compress a compressible string
const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
std::string compressed;
if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
- fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
+ std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
} else if (compressed.size() >= sizeof(text)) {
- fprintf(stdout, "WARNING: Snappy compression is not effective\n");
+ std::fprintf(stdout, "WARNING: Snappy compression is not effective\n");
}
}
void PrintEnvironment() {
- fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
- kMinorVersion);
+ std::fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
+ kMinorVersion);
#if defined(__linux)
time_t now = time(nullptr);
- fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
- FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
@@ -386,9 +510,9 @@ class Benchmark {
cache_size = val.ToString();
}
}
- fclose(cpuinfo);
- fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
- fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
}
#endif
}
@@ -404,12 +528,14 @@ class Benchmark {
value_size_(FLAGS_value_size),
entries_per_batch_(1),
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- heap_counter_(0) {
+ heap_counter_(0),
+ count_comparator_(BytewiseComparator()),
+ total_thread_count_(0) {
std::vector files;
g_env->GetChildren(FLAGS_db, &files);
for (size_t i = 0; i < files.size(); i++) {
if (Slice(files[i]).starts_with("heap-")) {
- g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
+ g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]);
}
}
if (!FLAGS_use_existing_db) {
@@ -487,6 +613,8 @@ class Benchmark {
method = &Benchmark::ReadMissing;
} else if (name == Slice("seekrandom")) {
method = &Benchmark::SeekRandom;
+ } else if (name == Slice("seekordered")) {
+ method = &Benchmark::SeekOrdered;
} else if (name == Slice("readhot")) {
method = &Benchmark::ReadHot;
} else if (name == Slice("readrandomsmall")) {
@@ -507,6 +635,10 @@ class Benchmark {
method = &Benchmark::SnappyCompress;
} else if (name == Slice("snappyuncomp")) {
method = &Benchmark::SnappyUncompress;
+ } else if (name == Slice("zstdcomp")) {
+ method = &Benchmark::ZstdCompress;
+ } else if (name == Slice("zstduncomp")) {
+ method = &Benchmark::ZstdUncompress;
} else if (name == Slice("heapprofile")) {
HeapProfile();
} else if (name == Slice("stats")) {
@@ -515,14 +647,15 @@ class Benchmark {
PrintStats("leveldb.sstables");
} else {
if (!name.empty()) { // No error message for empty name
- fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
}
}
if (fresh_db) {
if (FLAGS_use_existing_db) {
- fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
- name.ToString().c_str());
+ std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
+ name.ToString().c_str());
method = nullptr;
} else {
delete db_;
@@ -583,7 +716,11 @@ class Benchmark {
arg[i].bm = this;
arg[i].method = method;
arg[i].shared = &shared;
- arg[i].thread = new ThreadState(i);
+ ++total_thread_count_;
+ // Seed the thread's random state deterministically based upon thread
+ // creation across all benchmarks. This ensures that the seeds are unique
+ // but reproducible when rerunning the same set of benchmarks.
+ arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
arg[i].thread->shared = &shared;
g_env->StartThread(ThreadBody, &arg[i]);
}
@@ -604,6 +741,11 @@ class Benchmark {
arg[0].thread->stats.Merge(arg[i].thread->stats);
}
arg[0].thread->stats.Report(name);
+ if (FLAGS_comparisons) {
+ fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons());
+ count_comparator_.reset();
+ fflush(stdout);
+ }
for (int i = 0; i < n; i++) {
delete arg[i].thread;
@@ -624,57 +766,37 @@ class Benchmark {
bytes += size;
}
// Print so result is not dead
- fprintf(stderr, "... crc=0x%x\r", static_cast(crc));
+ std::fprintf(stderr, "... crc=0x%x\r", static_cast(crc));
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(label);
}
void SnappyCompress(ThreadState* thread) {
- RandomGenerator gen;
- Slice input = gen.Generate(Options().block_size);
- int64_t bytes = 0;
- int64_t produced = 0;
- bool ok = true;
- std::string compressed;
- while (ok && bytes < 1024 * 1048576) { // Compress 1G
- ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
- produced += compressed.size();
- bytes += input.size();
- thread->stats.FinishedSingleOp();
- }
-
- if (!ok) {
- thread->stats.AddMessage("(snappy failure)");
- } else {
- char buf[100];
- snprintf(buf, sizeof(buf), "(output: %.1f%%)",
- (produced * 100.0) / bytes);
- thread->stats.AddMessage(buf);
- thread->stats.AddBytes(bytes);
- }
+ Compress(thread, "snappy", &port::Snappy_Compress);
}
void SnappyUncompress(ThreadState* thread) {
- RandomGenerator gen;
- Slice input = gen.Generate(Options().block_size);
- std::string compressed;
- bool ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
- int64_t bytes = 0;
- char* uncompressed = new char[input.size()];
- while (ok && bytes < 1024 * 1048576) { // Compress 1G
- ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
- uncompressed);
- bytes += input.size();
- thread->stats.FinishedSingleOp();
- }
- delete[] uncompressed;
+ Uncompress(thread, "snappy", &port::Snappy_Compress,
+ &port::Snappy_Uncompress);
+ }
- if (!ok) {
- thread->stats.AddMessage("(snappy failure)");
- } else {
- thread->stats.AddBytes(bytes);
- }
+ void ZstdCompress(ThreadState* thread) {
+ Compress(thread, "zstd",
+ [](const char* input, size_t length, std::string* output) {
+ return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
+ length, output);
+ });
+ }
+
+ void ZstdUncompress(ThreadState* thread) {
+ Uncompress(
+ thread, "zstd",
+ [](const char* input, size_t length, std::string* output) {
+ return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
+ length, output);
+ },
+ &port::Zstd_Uncompress);
}
void Open() {
@@ -686,13 +808,18 @@ class Benchmark {
options.write_buffer_size = FLAGS_write_buffer_size;
options.max_file_size = FLAGS_max_file_size;
options.block_size = FLAGS_block_size;
+ if (FLAGS_comparisons) {
+ options.comparator = &count_comparator_;
+ }
options.max_open_files = FLAGS_open_files;
options.filter_policy = filter_policy_;
options.reuse_logs = FLAGS_reuse_logs;
+ options.compression =
+ FLAGS_compression ? kSnappyCompression : kNoCompression;
Status s = DB::Open(options, FLAGS_db, &db_);
if (!s.ok()) {
- fprintf(stderr, "open error: %s\n", s.ToString().c_str());
- exit(1);
+ std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
+ std::exit(1);
}
}
@@ -711,7 +838,7 @@ class Benchmark {
void DoWrite(ThreadState* thread, bool seq) {
if (num_ != FLAGS_num) {
char msg[100];
- snprintf(msg, sizeof(msg), "(%d ops)", num_);
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_);
thread->stats.AddMessage(msg);
}
@@ -719,20 +846,20 @@ class Benchmark {
WriteBatch batch;
Status s;
int64_t bytes = 0;
+ KeyBuffer key;
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
- char key[100];
- snprintf(key, sizeof(key), "%016d", k);
- batch.Put(key, gen.Generate(value_size_));
- bytes += value_size_ + strlen(key);
+ const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ batch.Put(key.slice(), gen.Generate(value_size_));
+ bytes += value_size_ + key.slice().size();
thread->stats.FinishedSingleOp();
}
s = db_->Write(write_options_, &batch);
if (!s.ok()) {
- fprintf(stderr, "put error: %s\n", s.ToString().c_str());
- exit(1);
+ std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+ std::exit(1);
}
}
thread->stats.AddBytes(bytes);
@@ -768,28 +895,29 @@ class Benchmark {
ReadOptions options;
std::string value;
int found = 0;
+ KeyBuffer key;
for (int i = 0; i < reads_; i++) {
- char key[100];
- const int k = thread->rand.Next() % FLAGS_num;
- snprintf(key, sizeof(key), "%016d", k);
- if (db_->Get(options, key, &value).ok()) {
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ if (db_->Get(options, key.slice(), &value).ok()) {
found++;
}
thread->stats.FinishedSingleOp();
}
char msg[100];
- snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+ std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
thread->stats.AddMessage(msg);
}
void ReadMissing(ThreadState* thread) {
ReadOptions options;
std::string value;
+ KeyBuffer key;
for (int i = 0; i < reads_; i++) {
- char key[100];
- const int k = thread->rand.Next() % FLAGS_num;
- snprintf(key, sizeof(key), "%016d.", k);
- db_->Get(options, key, &value);
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ Slice s = Slice(key.slice().data(), key.slice().size() - 1);
+ db_->Get(options, s, &value);
thread->stats.FinishedSingleOp();
}
}
@@ -798,11 +926,11 @@ class Benchmark {
ReadOptions options;
std::string value;
const int range = (FLAGS_num + 99) / 100;
+ KeyBuffer key;
for (int i = 0; i < reads_; i++) {
- char key[100];
- const int k = thread->rand.Next() % range;
- snprintf(key, sizeof(key), "%016d", k);
- db_->Get(options, key, &value);
+ const int k = thread->rand.Uniform(range);
+ key.Set(k);
+ db_->Get(options, key.slice(), &value);
thread->stats.FinishedSingleOp();
}
}
@@ -810,13 +938,13 @@ class Benchmark {
void SeekRandom(ThreadState* thread) {
ReadOptions options;
int found = 0;
+ KeyBuffer key;
for (int i = 0; i < reads_; i++) {
Iterator* iter = db_->NewIterator(options);
- char key[100];
- const int k = thread->rand.Next() % FLAGS_num;
- snprintf(key, sizeof(key), "%016d", k);
- iter->Seek(key);
- if (iter->Valid() && iter->key() == key) found++;
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ iter->Seek(key.slice());
+ if (iter->Valid() && iter->key() == key.slice()) found++;
delete iter;
thread->stats.FinishedSingleOp();
}
@@ -825,23 +953,42 @@ class Benchmark {
thread->stats.AddMessage(msg);
}
+ void SeekOrdered(ThreadState* thread) {
+ ReadOptions options;
+ Iterator* iter = db_->NewIterator(options);
+ int found = 0;
+ int k = 0;
+ KeyBuffer key;
+ for (int i = 0; i < reads_; i++) {
+ k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
+ key.Set(k);
+ iter->Seek(key.slice());
+ if (iter->Valid() && iter->key() == key.slice()) found++;
+ thread->stats.FinishedSingleOp();
+ }
+ delete iter;
+ char msg[100];
+ std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
+ thread->stats.AddMessage(msg);
+ }
+
void DoDelete(ThreadState* thread, bool seq) {
RandomGenerator gen;
WriteBatch batch;
Status s;
+ KeyBuffer key;
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
- char key[100];
- snprintf(key, sizeof(key), "%016d", k);
- batch.Delete(key);
+ const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
+ key.Set(k);
+ batch.Delete(key.slice());
thread->stats.FinishedSingleOp();
}
s = db_->Write(write_options_, &batch);
if (!s.ok()) {
- fprintf(stderr, "del error: %s\n", s.ToString().c_str());
- exit(1);
+ std::fprintf(stderr, "del error: %s\n", s.ToString().c_str());
+ std::exit(1);
}
}
}
@@ -856,6 +1003,7 @@ class Benchmark {
} else {
// Special thread that keeps writing until other threads are done.
RandomGenerator gen;
+ KeyBuffer key;
while (true) {
{
MutexLock l(&thread->shared->mu);
@@ -865,13 +1013,13 @@ class Benchmark {
}
}
- const int k = thread->rand.Next() % FLAGS_num;
- char key[100];
- snprintf(key, sizeof(key), "%016d", k);
- Status s = db_->Put(write_options_, key, gen.Generate(value_size_));
+ const int k = thread->rand.Uniform(FLAGS_num);
+ key.Set(k);
+ Status s =
+ db_->Put(write_options_, key.slice(), gen.Generate(value_size_));
if (!s.ok()) {
- fprintf(stderr, "put error: %s\n", s.ToString().c_str());
- exit(1);
+ std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
+ std::exit(1);
}
}
@@ -887,7 +1035,7 @@ class Benchmark {
if (!db_->GetProperty(key, &stats)) {
stats = "(failed)";
}
- fprintf(stdout, "\n%s\n", stats.c_str());
+ std::fprintf(stdout, "\n%s\n", stats.c_str());
}
static void WriteToFile(void* arg, const char* buf, int n) {
@@ -896,18 +1044,19 @@ class Benchmark {
void HeapProfile() {
char fname[100];
- snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
+ std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db,
+ ++heap_counter_);
WritableFile* file;
Status s = g_env->NewWritableFile(fname, &file);
if (!s.ok()) {
- fprintf(stderr, "%s\n", s.ToString().c_str());
+ std::fprintf(stderr, "%s\n", s.ToString().c_str());
return;
}
bool ok = port::GetHeapProfile(WriteToFile, file);
delete file;
if (!ok) {
- fprintf(stderr, "heap profiling not supported\n");
- g_env->DeleteFile(fname);
+ std::fprintf(stderr, "heap profiling not supported\n");
+ g_env->RemoveFile(fname);
}
}
};
@@ -932,12 +1081,18 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_histogram = n;
+ } else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_comparisons = n;
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_existing_db = n;
} else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_reuse_logs = n;
+ } else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_compression = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
@@ -952,6 +1107,8 @@ int main(int argc, char** argv) {
FLAGS_max_file_size = n;
} else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
FLAGS_block_size = n;
+ } else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) {
+ FLAGS_key_prefix = n;
} else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
FLAGS_cache_size = n;
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
@@ -961,8 +1118,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5;
} else {
- fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
- exit(1);
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
}
}
diff --git a/src/leveldb/benchmarks/db_bench_log.cc b/src/leveldb/benchmarks/db_bench_log.cc
new file mode 100644
index 000000000000..a1845bf14cee
--- /dev/null
+++ b/src/leveldb/benchmarks/db_bench_log.cc
@@ -0,0 +1,92 @@
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include
+#include
+#include
+
+#include "gtest/gtest.h"
+#include "benchmark/benchmark.h"
+#include "db/version_set.h"
+#include "leveldb/comparator.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/options.h"
+#include "port/port.h"
+#include "util/mutexlock.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+namespace {
+
+std::string MakeKey(unsigned int num) {
+ char buf[30];
+ std::snprintf(buf, sizeof(buf), "%016u", num);
+ return std::string(buf);
+}
+
+void BM_LogAndApply(benchmark::State& state) {
+ const int num_base_files = state.range(0);
+
+ std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
+ DestroyDB(dbname, Options());
+
+ DB* db = nullptr;
+ Options opts;
+ opts.create_if_missing = true;
+ Status s = DB::Open(opts, dbname, &db);
+ ASSERT_LEVELDB_OK(s);
+ ASSERT_TRUE(db != nullptr);
+
+ delete db;
+ db = nullptr;
+
+ Env* env = Env::Default();
+
+ port::Mutex mu;
+ MutexLock l(&mu);
+
+ InternalKeyComparator cmp(BytewiseComparator());
+ Options options;
+ VersionSet vset(dbname, &options, nullptr, &cmp);
+ bool save_manifest;
+ ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
+ VersionEdit vbase;
+ uint64_t fnum = 1;
+ for (int i = 0; i < num_base_files; i++) {
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+ vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
+ }
+ ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
+
+ uint64_t start_micros = env->NowMicros();
+
+ for (auto st : state) {
+ VersionEdit vedit;
+ vedit.RemoveFile(2, fnum);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
+ vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
+ vset.LogAndApply(&vedit, &mu);
+ }
+
+ uint64_t stop_micros = env->NowMicros();
+ unsigned int us = stop_micros - start_micros;
+ char buf[16];
+ std::snprintf(buf, sizeof(buf), "%d", num_base_files);
+ std::fprintf(stderr,
+ "BM_LogAndApply/%-6s %8" PRIu64
+ " iters : %9u us (%7.0f us / iter)\n",
+ buf, state.iterations(), us, ((float)us) / state.iterations());
+}
+
+BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
+
+} // namespace
+
+} // namespace leveldb
+
+BENCHMARK_MAIN();
diff --git a/src/leveldb/benchmarks/db_bench_sqlite3.cc b/src/leveldb/benchmarks/db_bench_sqlite3.cc
index f183f4fcfdf4..c9be652ad49f 100644
--- a/src/leveldb/benchmarks/db_bench_sqlite3.cc
+++ b/src/leveldb/benchmarks/db_bench_sqlite3.cc
@@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include
-#include
-#include
+
+#include
+#include
#include "util/histogram.h"
#include "util/random.h"
@@ -69,6 +70,9 @@ static int FLAGS_num_pages = 4096;
// benchmark will fail.
static bool FLAGS_use_existing_db = false;
+// If true, the SQLite table has ROWIDs.
+static bool FLAGS_use_rowids = false;
+
// If true, we allow batch writes to occur
static bool FLAGS_transaction = true;
@@ -80,23 +84,23 @@ static const char* FLAGS_db = nullptr;
inline static void ExecErrorCheck(int status, char* err_msg) {
if (status != SQLITE_OK) {
- fprintf(stderr, "SQL error: %s\n", err_msg);
+ std::fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
- exit(1);
+ std::exit(1);
}
}
inline static void StepErrorCheck(int status) {
if (status != SQLITE_DONE) {
- fprintf(stderr, "SQL step error: status = %d\n", status);
- exit(1);
+ std::fprintf(stderr, "SQL step error: status = %d\n", status);
+ std::exit(1);
}
}
inline static void ErrorCheck(int status) {
if (status != SQLITE_OK) {
- fprintf(stderr, "sqlite3 error: status = %d\n", status);
- exit(1);
+ std::fprintf(stderr, "sqlite3 error: status = %d\n", status);
+ std::exit(1);
}
}
@@ -178,36 +182,38 @@ class Benchmark {
void PrintHeader() {
const int kKeySize = 16;
PrintEnvironment();
- fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
- fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
- fprintf(stdout, "Entries: %d\n", num_);
- fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast(kKeySize + FLAGS_value_size) * num_) /
- 1048576.0));
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
PrintWarnings();
- fprintf(stdout, "------------------------------------------------\n");
+ std::fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(
+ std::fprintf(
stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
- fprintf(stdout,
- "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif
}
void PrintEnvironment() {
- fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
+ std::fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
#if defined(__linux)
time_t now = time(nullptr);
- fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
- FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
@@ -227,9 +233,9 @@ class Benchmark {
cache_size = val.ToString();
}
}
- fclose(cpuinfo);
- fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
- fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
}
#endif
}
@@ -250,8 +256,8 @@ class Benchmark {
double micros = (now - last_op_finish_) * 1e6;
hist_.Add(micros);
if (micros > 20000) {
- fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
- fflush(stderr);
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
}
last_op_finish_ = now;
}
@@ -272,8 +278,8 @@ class Benchmark {
next_report_ += 50000;
else
next_report_ += 100000;
- fprintf(stderr, "... finished %d ops%30s\r", done_, "");
- fflush(stderr);
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
}
}
@@ -286,8 +292,8 @@ class Benchmark {
if (bytes_ > 0) {
char rate[100];
- snprintf(rate, sizeof(rate), "%6.1f MB/s",
- (bytes_ / 1048576.0) / (finish - start_));
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
message_ = std::string(rate) + " " + message_;
} else {
@@ -295,13 +301,14 @@ class Benchmark {
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
- (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
- message_.c_str());
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+ (message_.empty() ? "" : " "), message_.c_str());
if (FLAGS_histogram) {
- fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
}
- fflush(stdout);
+ std::fflush(stdout);
}
public:
@@ -325,7 +332,7 @@ class Benchmark {
std::string file_name(test_dir);
file_name += "/";
file_name += files[i];
- Env::Default()->DeleteFile(file_name.c_str());
+ Env::Default()->RemoveFile(file_name.c_str());
}
}
}
@@ -401,7 +408,8 @@ class Benchmark {
} else {
known = false;
if (name != Slice()) { // No error message for empty name
- fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
}
}
if (known) {
@@ -421,26 +429,26 @@ class Benchmark {
// Open database
std::string tmp_dir;
Env::Default()->GetTestDirectory(&tmp_dir);
- snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
- tmp_dir.c_str(), db_num_);
+ std::snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+ tmp_dir.c_str(), db_num_);
status = sqlite3_open(file_name, &db_);
if (status) {
- fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
- exit(1);
+ std::fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
+ std::exit(1);
}
// Change SQLite cache size
char cache_size[100];
- snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
- FLAGS_num_pages);
+ std::snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
+ FLAGS_num_pages);
status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// FLAGS_page_size is defaulted to 1024
if (FLAGS_page_size != 1024) {
char page_size[100];
- snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
- FLAGS_page_size);
+ std::snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
+ FLAGS_page_size);
status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
@@ -462,6 +470,7 @@ class Benchmark {
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
std::string create_stmt =
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+ if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID";
std::string stmt_array[] = {locking_stmt, create_stmt};
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) {
@@ -487,7 +496,7 @@ class Benchmark {
if (num_entries != num_) {
char msg[100];
- snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
message_ = msg;
}
@@ -534,7 +543,7 @@ class Benchmark {
const int k =
(order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
char key[100];
- snprintf(key, sizeof(key), "%016d", k);
+ std::snprintf(key, sizeof(key), "%016d", k);
// Bind KV values into replace_stmt
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
@@ -607,7 +616,7 @@ class Benchmark {
// Create key value
char key[100];
int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
- snprintf(key, sizeof(key), "%016d", k);
+ std::snprintf(key, sizeof(key), "%016d", k);
// Bind key value into read_stmt
status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
@@ -678,6 +687,9 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_existing_db = n;
+ } else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 &&
+ (n == 0 || n == 1)) {
+ FLAGS_use_rowids = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
@@ -696,8 +708,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5;
} else {
- fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
- exit(1);
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
}
}
diff --git a/src/leveldb/benchmarks/db_bench_tree_db.cc b/src/leveldb/benchmarks/db_bench_tree_db.cc
index b2f6646d8996..533600b1baba 100644
--- a/src/leveldb/benchmarks/db_bench_tree_db.cc
+++ b/src/leveldb/benchmarks/db_bench_tree_db.cc
@@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include
-#include
-#include
+
+#include
+#include
#include "util/histogram.h"
#include "util/random.h"
@@ -74,7 +75,7 @@ static const char* FLAGS_db = nullptr;
inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
// Synchronize will flush writes to disk
if (!db_->synchronize()) {
- fprintf(stderr, "synchronize error: %s\n", db_->error().name());
+ std::fprintf(stderr, "synchronize error: %s\n", db_->error().name());
}
}
@@ -149,42 +150,47 @@ class Benchmark {
void PrintHeader() {
const int kKeySize = 16;
PrintEnvironment();
- fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
- fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n",
- FLAGS_value_size,
- static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
- fprintf(stdout, "Entries: %d\n", num_);
- fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast(kKeySize + FLAGS_value_size) * num_) /
- 1048576.0));
- fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
- 1048576.0));
+ std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
+ std::fprintf(
+ stdout, "Values: %d bytes each (%d bytes after compression)\n",
+ FLAGS_value_size,
+ static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
+ std::fprintf(stdout, "Entries: %d\n", num_);
+ std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
+ ((static_cast(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
+ std::fprintf(
+ stdout, "FileSize: %.1f MB (estimated)\n",
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
- fprintf(stdout, "------------------------------------------------\n");
+ std::fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(
+ std::fprintf(
stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
- fprintf(stdout,
- "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
+ std::fprintf(
+ stdout,
+ "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif
}
void PrintEnvironment() {
- fprintf(stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n",
- kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
+ std::fprintf(
+ stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n",
+ kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
#if defined(__linux)
time_t now = time(nullptr);
- fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
+ std::fprintf(stderr, "Date: %s",
+ ctime(&now)); // ctime() adds newline
- FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
+ FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
@@ -204,9 +210,10 @@ class Benchmark {
cache_size = val.ToString();
}
}
- fclose(cpuinfo);
- fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
- fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
+ std::fclose(cpuinfo);
+ std::fprintf(stderr, "CPU: %d * %s\n", num_cpus,
+ cpu_type.c_str());
+ std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
}
#endif
}
@@ -227,8 +234,8 @@ class Benchmark {
double micros = (now - last_op_finish_) * 1e6;
hist_.Add(micros);
if (micros > 20000) {
- fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
- fflush(stderr);
+ std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
+ std::fflush(stderr);
}
last_op_finish_ = now;
}
@@ -249,8 +256,8 @@ class Benchmark {
next_report_ += 50000;
else
next_report_ += 100000;
- fprintf(stderr, "... finished %d ops%30s\r", done_, "");
- fflush(stderr);
+ std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
+ std::fflush(stderr);
}
}
@@ -263,8 +270,8 @@ class Benchmark {
if (bytes_ > 0) {
char rate[100];
- snprintf(rate, sizeof(rate), "%6.1f MB/s",
- (bytes_ / 1048576.0) / (finish - start_));
+ std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
+ (bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
message_ = std::string(rate) + " " + message_;
} else {
@@ -272,13 +279,14 @@ class Benchmark {
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
- (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
- message_.c_str());
+ std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
+ name.ToString().c_str(), (finish - start_) * 1e6 / done_,
+ (message_.empty() ? "" : " "), message_.c_str());
if (FLAGS_histogram) {
- fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
+ std::fprintf(stdout, "Microseconds per op:\n%s\n",
+ hist_.ToString().c_str());
}
- fflush(stdout);
+ std::fflush(stdout);
}
public:
@@ -301,7 +309,7 @@ class Benchmark {
std::string file_name(test_dir);
file_name += "/";
file_name += files[i];
- Env::Default()->DeleteFile(file_name.c_str());
+ Env::Default()->RemoveFile(file_name.c_str());
}
}
}
@@ -309,7 +317,7 @@ class Benchmark {
~Benchmark() {
if (!db_->close()) {
- fprintf(stderr, "close error: %s\n", db_->error().name());
+ std::fprintf(stderr, "close error: %s\n", db_->error().name());
}
}
@@ -373,7 +381,8 @@ class Benchmark {
} else {
known = false;
if (name != Slice()) { // No error message for empty name
- fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
+ std::fprintf(stderr, "unknown benchmark '%s'\n",
+ name.ToString().c_str());
}
}
if (known) {
@@ -392,8 +401,8 @@ class Benchmark {
db_num_++;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
- snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
- test_dir.c_str(), db_num_);
+ std::snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+ test_dir.c_str(), db_num_);
// Create tuning options and open the database
int open_options =
@@ -412,7 +421,7 @@ class Benchmark {
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
}
if (!db_->open(file_name, open_options)) {
- fprintf(stderr, "open error: %s\n", db_->error().name());
+ std::fprintf(stderr, "open error: %s\n", db_->error().name());
}
}
@@ -432,7 +441,7 @@ class Benchmark {
if (num_entries != num_) {
char msg[100];
- snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
+ std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
message_ = msg;
}
@@ -440,11 +449,11 @@ class Benchmark {
for (int i = 0; i < num_entries; i++) {
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
char key[100];
- snprintf(key, sizeof(key), "%016d", k);
+ std::snprintf(key, sizeof(key), "%016d", k);
bytes_ += value_size + strlen(key);
std::string cpp_key = key;
if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) {
- fprintf(stderr, "set error: %s\n", db_->error().name());
+ std::fprintf(stderr, "set error: %s\n", db_->error().name());
}
FinishedSingleOp();
}
@@ -466,7 +475,7 @@ class Benchmark {
for (int i = 0; i < reads_; i++) {
char key[100];
const int k = rand_.Next() % reads_;
- snprintf(key, sizeof(key), "%016d", k);
+ std::snprintf(key, sizeof(key), "%016d", k);
db_->get(key, &value);
FinishedSingleOp();
}
@@ -504,8 +513,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5;
} else {
- fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
- exit(1);
+ std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
+ std::exit(1);
}
}
diff --git a/src/leveldb/cmake/leveldbConfig.cmake b/src/leveldb/cmake/leveldbConfig.cmake
deleted file mode 100644
index eea6e5c4776b..000000000000
--- a/src/leveldb/cmake/leveldbConfig.cmake
+++ /dev/null
@@ -1 +0,0 @@
-include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
diff --git a/src/leveldb/cmake/leveldbConfig.cmake.in b/src/leveldb/cmake/leveldbConfig.cmake.in
new file mode 100644
index 000000000000..2572728f61d0
--- /dev/null
+++ b/src/leveldb/cmake/leveldbConfig.cmake.in
@@ -0,0 +1,9 @@
+# Copyright 2019 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+@PACKAGE_INIT@
+
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
+
+check_required_components(leveldb)
\ No newline at end of file
diff --git a/src/leveldb/db/autocompact_test.cc b/src/leveldb/db/autocompact_test.cc
index e6c97a05a6b7..69341e3c962c 100644
--- a/src/leveldb/db/autocompact_test.cc
+++ b/src/leveldb/db/autocompact_test.cc
@@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
#include "leveldb/db.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
-class AutoCompactTest {
+class AutoCompactTest : public testing::Test {
public:
AutoCompactTest() {
- dbname_ = test::TmpDir() + "/autocompact_test";
+ dbname_ = testing::TempDir() + "autocompact_test";
tiny_cache_ = NewLRUCache(100);
options_.block_cache = tiny_cache_;
DestroyDB(dbname_, options_);
options_.create_if_missing = true;
options_.compression = kNoCompression;
- ASSERT_OK(DB::Open(options_, dbname_, &db_));
+ EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_));
}
~AutoCompactTest() {
@@ -30,7 +30,7 @@ class AutoCompactTest {
std::string Key(int i) {
char buf[100];
- snprintf(buf, sizeof(buf), "key%06d", i);
+ std::snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
@@ -62,15 +62,15 @@ void AutoCompactTest::DoReads(int n) {
// Fill database
for (int i = 0; i < kCount; i++) {
- ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value));
}
- ASSERT_OK(dbi->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Delete everything
for (int i = 0; i < kCount; i++) {
- ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i)));
}
- ASSERT_OK(dbi->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Get initial measurement of the space we will be reading.
const int64_t initial_size = Size(Key(0), Key(n));
@@ -89,8 +89,8 @@ void AutoCompactTest::DoReads(int n) {
// Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n));
- fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
- size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+ std::fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+ size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
if (size <= initial_size / 10) {
break;
}
@@ -103,10 +103,8 @@ void AutoCompactTest::DoReads(int n) {
ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
}
-TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
+TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
-TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
+TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb
-
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/builder.cc b/src/leveldb/db/builder.cc
index 9520ee4535f4..e6329e05e457 100644
--- a/src/leveldb/db/builder.cc
+++ b/src/leveldb/db/builder.cc
@@ -30,11 +30,14 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
TableBuilder* builder = new TableBuilder(options, file);
meta->smallest.DecodeFrom(iter->key());
+ Slice key;
for (; iter->Valid(); iter->Next()) {
- Slice key = iter->key();
- meta->largest.DecodeFrom(key);
+ key = iter->key();
builder->Add(key, iter->value());
}
+ if (!key.empty()) {
+ meta->largest.DecodeFrom(key);
+ }
// Finish and check for builder errors
s = builder->Finish();
@@ -71,7 +74,7 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
if (s.ok() && meta->file_size > 0) {
// Keep it
} else {
- env->DeleteFile(fname);
+ env->RemoveFile(fname);
}
return s;
}
diff --git a/src/leveldb/db/c.cc b/src/leveldb/db/c.cc
index 3a492f9ac558..8bdde383445c 100644
--- a/src/leveldb/db/c.cc
+++ b/src/leveldb/db/c.cc
@@ -4,6 +4,8 @@
#include "leveldb/c.h"
+#include
+
#include
#include
@@ -119,7 +121,7 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
size_t len;
char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
dst->append(filter, len);
- free(filter);
+ std::free(filter);
}
bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
@@ -150,15 +152,16 @@ static bool SaveError(char** errptr, const Status& s) {
*errptr = strdup(s.ToString().c_str());
} else {
// TODO(sanjay): Merge with existing error?
- free(*errptr);
+ std::free(*errptr);
*errptr = strdup(s.ToString().c_str());
}
return true;
}
static char* CopyString(const std::string& str) {
- char* result = reinterpret_cast(malloc(sizeof(char) * str.size()));
- memcpy(result, str.data(), sizeof(char) * str.size());
+ char* result =
+ reinterpret_cast(std::malloc(sizeof(char) * str.size()));
+ std::memcpy(result, str.data(), sizeof(char) * str.size());
return result;
}
@@ -547,13 +550,13 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
return nullptr;
}
- char* buffer = static_cast(malloc(result.size() + 1));
- memcpy(buffer, result.data(), result.size());
+ char* buffer = static_cast(std::malloc(result.size() + 1));
+ std::memcpy(buffer, result.data(), result.size());
buffer[result.size()] = '\0';
return buffer;
}
-void leveldb_free(void* ptr) { free(ptr); }
+void leveldb_free(void* ptr) { std::free(ptr); }
int leveldb_major_version() { return kMajorVersion; }
diff --git a/src/leveldb/db/corruption_test.cc b/src/leveldb/db/corruption_test.cc
index 42f5237c659e..dc7da763f060 100644
--- a/src/leveldb/db/corruption_test.cc
+++ b/src/leveldb/db/corruption_test.cc
@@ -4,6 +4,7 @@
#include
+#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
@@ -13,14 +14,13 @@
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
#include "util/logging.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
static const int kValueSize = 1000;
-class CorruptionTest {
+class CorruptionTest : public testing::Test {
public:
CorruptionTest()
: db_(nullptr),
@@ -46,19 +46,19 @@ class CorruptionTest {
return DB::Open(options_, dbname_, &db_);
}
- void Reopen() { ASSERT_OK(TryReopen()); }
+ void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); }
void RepairDB() {
delete db_;
db_ = nullptr;
- ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
+ ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
}
void Build(int n) {
std::string key_space, value_space;
WriteBatch batch;
for (int i = 0; i < n; i++) {
- // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+ // if ((i % 100) == 0) std::fprintf(stderr, "@ %d of %d\n", i, n);
Slice key = Key(i, &key_space);
batch.Clear();
batch.Put(key, Value(i, &value_space));
@@ -68,7 +68,7 @@ class CorruptionTest {
if (i == n - 1) {
options.sync = true;
}
- ASSERT_OK(db_->Write(options, &batch));
+ ASSERT_LEVELDB_OK(db_->Write(options, &batch));
}
}
@@ -102,9 +102,10 @@ class CorruptionTest {
}
delete iter;
- fprintf(stderr,
- "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
- min_expected, max_expected, correct, bad_keys, bad_values, missed);
+ std::fprintf(
+ stderr,
+ "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
+ min_expected, max_expected, correct, bad_keys, bad_values, missed);
ASSERT_LE(min_expected, correct);
ASSERT_GE(max_expected, correct);
}
@@ -112,7 +113,7 @@ class CorruptionTest {
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt
std::vector filenames;
- ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
+ ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
std::string fname;
@@ -127,7 +128,7 @@ class CorruptionTest {
ASSERT_TRUE(!fname.empty()) << filetype;
uint64_t file_size;
- ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
+ ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) {
// Relative to end of file; make it absolute
@@ -169,7 +170,7 @@ class CorruptionTest {
// Return the ith key
Slice Key(int i, std::string* storage) {
char buf[100];
- snprintf(buf, sizeof(buf), "%016d", i);
+ std::snprintf(buf, sizeof(buf), "%016d", i);
storage->assign(buf, strlen(buf));
return Slice(*storage);
}
@@ -189,7 +190,7 @@ class CorruptionTest {
Cache* tiny_cache_;
};
-TEST(CorruptionTest, Recovery) {
+TEST_F(CorruptionTest, Recovery) {
Build(100);
Check(100, 100);
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
@@ -200,13 +201,13 @@ TEST(CorruptionTest, Recovery) {
Check(36, 36);
}
-TEST(CorruptionTest, RecoverWriteError) {
+TEST_F(CorruptionTest, RecoverWriteError) {
env_.writable_file_error_ = true;
Status s = TryReopen();
ASSERT_TRUE(!s.ok());
}
-TEST(CorruptionTest, NewFileErrorDuringWrite) {
+TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
// Do enough writing to force minor compaction
env_.writable_file_error_ = true;
const int num = 3 + (Options().write_buffer_size / kValueSize);
@@ -223,7 +224,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) {
Reopen();
}
-TEST(CorruptionTest, TableFile) {
+TEST_F(CorruptionTest, TableFile) {
Build(100);
DBImpl* dbi = reinterpret_cast(db_);
dbi->TEST_CompactMemTable();
@@ -234,7 +235,7 @@ TEST(CorruptionTest, TableFile) {
Check(90, 99);
}
-TEST(CorruptionTest, TableFileRepair) {
+TEST_F(CorruptionTest, TableFileRepair) {
options_.block_size = 2 * kValueSize; // Limit scope of corruption
options_.paranoid_checks = true;
Reopen();
@@ -250,7 +251,7 @@ TEST(CorruptionTest, TableFileRepair) {
Check(95, 99);
}
-TEST(CorruptionTest, TableFileIndexData) {
+TEST_F(CorruptionTest, TableFileIndexData) {
Build(10000); // Enough to build multiple Tables
DBImpl* dbi = reinterpret_cast(db_);
dbi->TEST_CompactMemTable();
@@ -260,36 +261,36 @@ TEST(CorruptionTest, TableFileIndexData) {
Check(5000, 9999);
}
-TEST(CorruptionTest, MissingDescriptor) {
+TEST_F(CorruptionTest, MissingDescriptor) {
Build(1000);
RepairDB();
Reopen();
Check(1000, 1000);
}
-TEST(CorruptionTest, SequenceNumberRecovery) {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5"));
+TEST_F(CorruptionTest, SequenceNumberRecovery) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5"));
RepairDB();
Reopen();
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v5", v);
// Write something. If sequence number was not recovered properly,
// it will be hidden by an earlier write.
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6"));
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6"));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v);
Reopen();
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v);
}
-TEST(CorruptionTest, CorruptedDescriptor) {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
+TEST_F(CorruptionTest, CorruptedDescriptor) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast(db_);
dbi->TEST_CompactMemTable();
dbi->TEST_CompactRange(0, nullptr, nullptr);
@@ -301,11 +302,11 @@ TEST(CorruptionTest, CorruptedDescriptor) {
RepairDB();
Reopen();
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("hello", v);
}
-TEST(CorruptionTest, CompactionInputError) {
+TEST_F(CorruptionTest, CompactionInputError) {
Build(10);
DBImpl* dbi = reinterpret_cast(db_);
dbi->TEST_CompactMemTable();
@@ -320,7 +321,7 @@ TEST(CorruptionTest, CompactionInputError) {
Check(10000, 10000);
}
-TEST(CorruptionTest, CompactionInputErrorParanoid) {
+TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
options_.paranoid_checks = true;
options_.write_buffer_size = 512 << 10;
Reopen();
@@ -341,22 +342,21 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
}
-TEST(CorruptionTest, UnrelatedKeys) {
+TEST_F(CorruptionTest, UnrelatedKeys) {
Build(10);
DBImpl* dbi = reinterpret_cast(db_);
dbi->TEST_CompactMemTable();
Corrupt(kTableFile, 100, 1);
std::string tmp1, tmp2;
- ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
+ ASSERT_LEVELDB_OK(
+ db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
dbi->TEST_CompactMemTable();
- ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
}
} // namespace leveldb
-
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc
index 65e31724bcec..f96d245583c8 100644
--- a/src/leveldb/db/db_impl.cc
+++ b/src/leveldb/db/db_impl.cc
@@ -4,11 +4,10 @@
#include "db/db_impl.h"
-#include
-#include
-
#include
#include
+#include
+#include
#include
#include
#include
@@ -197,6 +196,9 @@ Status DBImpl::NewDB() {
std::string record;
new_db.EncodeTo(&record);
s = log.AddRecord(record);
+ if (s.ok()) {
+ s = file->Sync();
+ }
if (s.ok()) {
s = file->Close();
}
@@ -206,7 +208,7 @@ Status DBImpl::NewDB() {
// Make "CURRENT" file that points to the new manifest file.
s = SetCurrentFile(env_, dbname_, 1);
} else {
- env_->DeleteFile(manifest);
+ env_->RemoveFile(manifest);
}
return s;
}
@@ -220,7 +222,7 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
}
}
-void DBImpl::DeleteObsoleteFiles() {
+void DBImpl::RemoveObsoleteFiles() {
mutex_.AssertHeld();
if (!bg_error_.ok()) {
@@ -282,7 +284,7 @@ void DBImpl::DeleteObsoleteFiles() {
// are therefore safe to delete while allowing other threads to proceed.
mutex_.Unlock();
for (const std::string& filename : files_to_delete) {
- env_->DeleteFile(dbname_ + "/" + filename);
+ env_->RemoveFile(dbname_ + "/" + filename);
}
mutex_.Lock();
}
@@ -302,6 +304,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
if (!env_->FileExists(CurrentFileName(dbname_))) {
if (options_.create_if_missing) {
+ Log(options_.info_log, "Creating DB %s since it was missing.",
+ dbname_.c_str());
s = NewDB();
if (!s.ok()) {
return s;
@@ -351,8 +355,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
}
if (!expected.empty()) {
char buf[50];
- snprintf(buf, sizeof(buf), "%d missing files; e.g.",
- static_cast(expected.size()));
+ std::snprintf(buf, sizeof(buf), "%d missing files; e.g.",
+ static_cast(expected.size()));
return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
}
@@ -428,7 +432,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
while (reader.ReadRecord(&record, &scratch) && status.ok()) {
if (record.size() < 12) {
reporter.Corruption(record.size(),
- Status::Corruption("log record too small", fname));
+ Status::Corruption("log record too small"));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
@@ -569,7 +573,7 @@ void DBImpl::CompactMemTable() {
imm_->Unref();
imm_ = nullptr;
has_imm_.store(false, std::memory_order_release);
- DeleteObsoleteFiles();
+ RemoveObsoleteFiles();
} else {
RecordBackgroundError(s);
}
@@ -625,6 +629,11 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,
background_work_finished_signal_.Wait();
}
}
+ // Finish current background compaction in the case where
+ // `background_work_finished_signal_` was signalled due to an error.
+ while (background_compaction_scheduled_) {
+ background_work_finished_signal_.Wait();
+ }
if (manual_compaction_ == &manual) {
// Cancel my manual compaction since we aborted early for some reason.
manual_compaction_ = nullptr;
@@ -729,7 +738,7 @@ void DBImpl::BackgroundCompaction() {
// Move file to next level
assert(c->num_input_files(0) == 1);
FileMetaData* f = c->input(0, 0);
- c->edit()->DeleteFile(c->level(), f->number);
+ c->edit()->RemoveFile(c->level(), f->number);
c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
f->largest);
status = versions_->LogAndApply(c->edit(), &mutex_);
@@ -749,7 +758,7 @@ void DBImpl::BackgroundCompaction() {
}
CleanupCompaction(compact);
c->ReleaseInputs();
- DeleteObsoleteFiles();
+ RemoveObsoleteFiles();
}
delete c;
@@ -1364,8 +1373,22 @@ Status DBImpl::MakeRoomForWrite(bool force) {
versions_->ReuseFileNumber(new_log_number);
break;
}
+
delete log_;
+
+ s = logfile_->Close();
+ if (!s.ok()) {
+ // We may have lost some data written to the previous log file.
+ // Switch to the new log file anyway, but record as a background
+ // error so we do not attempt any more writes.
+ //
+ // We could perhaps attempt to save the memtable corresponding
+ // to log file and suppress the error if that works, but that
+ // would add more complexity in a critical code path.
+ RecordBackgroundError(s);
+ }
delete logfile_;
+
logfile_ = lfile;
logfile_number_ = new_log_number;
log_ = new log::Writer(lfile);
@@ -1397,26 +1420,26 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
return false;
} else {
char buf[100];
- snprintf(buf, sizeof(buf), "%d",
- versions_->NumLevelFiles(static_cast(level)));
+ std::snprintf(buf, sizeof(buf), "%d",
+ versions_->NumLevelFiles(static_cast(level)));
*value = buf;
return true;
}
} else if (in == "stats") {
char buf[200];
- snprintf(buf, sizeof(buf),
- " Compactions\n"
- "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
- "--------------------------------------------------\n");
+ std::snprintf(buf, sizeof(buf),
+ " Compactions\n"
+ "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
+ "--------------------------------------------------\n");
value->append(buf);
for (int level = 0; level < config::kNumLevels; level++) {
int files = versions_->NumLevelFiles(level);
if (stats_[level].micros > 0 || files > 0) {
- snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
- files, versions_->NumLevelBytes(level) / 1048576.0,
- stats_[level].micros / 1e6,
- stats_[level].bytes_read / 1048576.0,
- stats_[level].bytes_written / 1048576.0);
+ std::snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
+ level, files, versions_->NumLevelBytes(level) / 1048576.0,
+ stats_[level].micros / 1e6,
+ stats_[level].bytes_read / 1048576.0,
+ stats_[level].bytes_written / 1048576.0);
value->append(buf);
}
}
@@ -1433,8 +1456,8 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
total_usage += imm_->ApproximateMemoryUsage();
}
char buf[50];
- snprintf(buf, sizeof(buf), "%llu",
- static_cast(total_usage));
+ std::snprintf(buf, sizeof(buf), "%llu",
+ static_cast(total_usage));
value->append(buf);
return true;
}
@@ -1506,7 +1529,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
}
if (s.ok()) {
- impl->DeleteObsoleteFiles();
+ impl->RemoveObsoleteFiles();
impl->MaybeScheduleCompaction();
}
impl->mutex_.Unlock();
@@ -1539,15 +1562,15 @@ Status DestroyDB(const std::string& dbname, const Options& options) {
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) &&
type != kDBLockFile) { // Lock file will be deleted at end
- Status del = env->DeleteFile(dbname + "/" + filenames[i]);
+ Status del = env->RemoveFile(dbname + "/" + filenames[i]);
if (result.ok() && !del.ok()) {
result = del;
}
}
}
env->UnlockFile(lock); // Ignore error since state is already gone
- env->DeleteFile(lockname);
- env->DeleteDir(dbname); // Ignore error in case dir contains other files
+ env->RemoveFile(lockname);
+ env->RemoveDir(dbname); // Ignore error in case dir contains other files
}
return result;
}
diff --git a/src/leveldb/db/db_impl.h b/src/leveldb/db/db_impl.h
index 685735c733f9..c7b01721b85c 100644
--- a/src/leveldb/db/db_impl.h
+++ b/src/leveldb/db/db_impl.h
@@ -116,7 +116,7 @@ class DBImpl : public DB {
void MaybeIgnoreError(Status* s) const;
// Delete any unneeded files and stale in-memory entries.
- void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Compact the in-memory write buffer to disk. Switches to a new
// log-file/memtable and writes a new descriptor iff successful.
diff --git a/src/leveldb/db/db_iter.cc b/src/leveldb/db/db_iter.cc
index 98715a950235..532c2db81b2e 100644
--- a/src/leveldb/db/db_iter.cc
+++ b/src/leveldb/db/db_iter.cc
@@ -21,9 +21,9 @@ static void DumpInternalIter(Iterator* iter) {
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey k;
if (!ParseInternalKey(iter->key(), &k)) {
- fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
+ std::fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
} else {
- fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
+ std::fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
}
}
}
diff --git a/src/leveldb/db/db_iter.h b/src/leveldb/db/db_iter.h
index fd93e912a0de..5977fc893a1c 100644
--- a/src/leveldb/db/db_iter.h
+++ b/src/leveldb/db/db_iter.h
@@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_DB_ITER_H_
#define STORAGE_LEVELDB_DB_DB_ITER_H_
-#include
+#include
#include "db/dbformat.h"
#include "leveldb/db.h"
diff --git a/src/leveldb/db/db_test.cc b/src/leveldb/db/db_test.cc
index beb1d3bdef61..a4a84cd64665 100644
--- a/src/leveldb/db/db_test.cc
+++ b/src/leveldb/db/db_test.cc
@@ -5,8 +5,10 @@
#include "leveldb/db.h"
#include
+#include
#include
+#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
@@ -20,7 +22,6 @@
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
@@ -64,6 +65,19 @@ class AtomicCounter {
void DelayMilliseconds(int millis) {
Env::Default()->SleepForMicroseconds(millis * 1000);
}
+
+bool IsLdbFile(const std::string& f) {
+ return strstr(f.c_str(), ".ldb") != nullptr;
+}
+
+bool IsLogFile(const std::string& f) {
+ return strstr(f.c_str(), ".log") != nullptr;
+}
+
+bool IsManifestFile(const std::string& f) {
+ return strstr(f.c_str(), "MANIFEST") != nullptr;
+}
+
} // namespace
// Test Env to override default Env behavior for testing.
@@ -99,6 +113,10 @@ class TestEnv : public EnvWrapper {
// Special Env used to delay background operations.
class SpecialEnv : public EnvWrapper {
public:
+ // For historical reasons, the std::atomic<> fields below are currently
+ // accessed via acquired loads and release stores. We should switch
+ // to plain load(), store() calls that provide sequential consistency.
+
// sstable/log Sync() calls are blocked while this pointer is non-null.
std::atomic delay_data_sync_;
@@ -117,6 +135,9 @@ class SpecialEnv : public EnvWrapper {
// Force write to manifest files to fail while this pointer is non-null.
std::atomic manifest_write_error_;
+ // Force log file close to fail while this bool is true.
+ std::atomic log_file_close_;
+
bool count_random_reads_;
AtomicCounter random_read_counter_;
@@ -128,6 +149,7 @@ class SpecialEnv : public EnvWrapper {
non_writable_(false),
manifest_sync_error_(false),
manifest_write_error_(false),
+ log_file_close_(false),
count_random_reads_(false) {}
Status NewWritableFile(const std::string& f, WritableFile** r) {
@@ -135,9 +157,12 @@ class SpecialEnv : public EnvWrapper {
private:
SpecialEnv* const env_;
WritableFile* const base_;
+ const std::string fname_;
public:
- DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
+ DataFile(SpecialEnv* env, WritableFile* base, const std::string& fname)
+ : env_(env), base_(base), fname_(fname) {}
+
~DataFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->no_space_.load(std::memory_order_acquire)) {
@@ -147,7 +172,14 @@ class SpecialEnv : public EnvWrapper {
return base_->Append(data);
}
}
- Status Close() { return base_->Close(); }
+ Status Close() {
+ Status s = base_->Close();
+ if (s.ok() && IsLogFile(fname_) &&
+ env_->log_file_close_.load(std::memory_order_acquire)) {
+ s = Status::IOError("simulated log file Close error");
+ }
+ return s;
+ }
Status Flush() { return base_->Flush(); }
Status Sync() {
if (env_->data_sync_error_.load(std::memory_order_acquire)) {
@@ -158,7 +190,6 @@ class SpecialEnv : public EnvWrapper {
}
return base_->Sync();
}
- std::string GetName() const override { return ""; }
};
class ManifestFile : public WritableFile {
private:
@@ -184,7 +215,6 @@ class SpecialEnv : public EnvWrapper {
return base_->Sync();
}
}
- std::string GetName() const override { return ""; }
};
if (non_writable_.load(std::memory_order_acquire)) {
@@ -193,10 +223,9 @@ class SpecialEnv : public EnvWrapper {
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
- if (strstr(f.c_str(), ".ldb") != nullptr ||
- strstr(f.c_str(), ".log") != nullptr) {
- *r = new DataFile(this, *r);
- } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
+ if (IsLdbFile(f) || IsLogFile(f)) {
+ *r = new DataFile(this, *r, f);
+ } else if (IsManifestFile(f)) {
*r = new ManifestFile(this, *r);
}
}
@@ -218,7 +247,6 @@ class SpecialEnv : public EnvWrapper {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
- std::string GetName() const override { return ""; }
};
Status s = target()->NewRandomAccessFile(f, r);
@@ -229,7 +257,7 @@ class SpecialEnv : public EnvWrapper {
}
};
-class DBTest {
+class DBTest : public testing::Test {
public:
std::string dbname_;
SpecialEnv* env_;
@@ -239,7 +267,7 @@ class DBTest {
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
- dbname_ = test::TmpDir() + "/db_test";
+ dbname_ = testing::TempDir() + "db_test";
DestroyDB(dbname_, Options());
db_ = nullptr;
Reopen();
@@ -286,7 +314,9 @@ class DBTest {
DBImpl* dbfull() { return reinterpret_cast(db_); }
- void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
+ void Reopen(Options* options = nullptr) {
+ ASSERT_LEVELDB_OK(TryReopen(options));
+ }
void Close() {
delete db_;
@@ -297,7 +327,7 @@ class DBTest {
delete db_;
db_ = nullptr;
DestroyDB(dbname_, Options());
- ASSERT_OK(TryReopen(options));
+ ASSERT_LEVELDB_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
@@ -351,11 +381,11 @@ class DBTest {
// Check reverse iteration results are the reverse of forward results
size_t matched = 0;
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
- ASSERT_LT(matched, forward.size());
- ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
+ EXPECT_LT(matched, forward.size());
+ EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
matched++;
}
- ASSERT_EQ(matched, forward.size());
+ EXPECT_EQ(matched, forward.size());
delete iter;
return result;
@@ -405,7 +435,7 @@ class DBTest {
int NumTableFilesAtLevel(int level) {
std::string property;
- ASSERT_TRUE(db_->GetProperty(
+ EXPECT_TRUE(db_->GetProperty(
"leveldb.num-files-at-level" + NumberToString(level), &property));
return std::stoi(property);
}
@@ -425,7 +455,7 @@ class DBTest {
for (int level = 0; level < config::kNumLevels; level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];
- snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
+ std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
@@ -470,14 +500,14 @@ class DBTest {
}
void DumpFileCounts(const char* label) {
- fprintf(stderr, "---\n%s:\n", label);
- fprintf(
+ std::fprintf(stderr, "---\n%s:\n", label);
+ std::fprintf(
stderr, "maxoverlap: %lld\n",
static_cast(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
- fprintf(stderr, " level %3d : %d files\n", level, num);
+ std::fprintf(stderr, " level %3d : %d files\n", level, num);
}
}
}
@@ -500,12 +530,12 @@ class DBTest {
bool DeleteAnSSTFile() {
std::vector filenames;
- ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
- ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number)));
+ EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number)));
return true;
}
}
@@ -515,7 +545,7 @@ class DBTest {
// Returns number of files renamed.
int RenameLDBToSST() {
std::vector filenames;
- ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
int files_renamed = 0;
@@ -523,7 +553,7 @@ class DBTest {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
const std::string from = TableFileName(dbname_, number);
const std::string to = SSTTableFileName(dbname_, number);
- ASSERT_OK(env_->RenameFile(from, to));
+ EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
files_renamed++;
}
}
@@ -538,63 +568,63 @@ class DBTest {
int option_config_;
};
-TEST(DBTest, Empty) {
+TEST_F(DBTest, Empty) {
do {
ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, EmptyKey) {
+TEST_F(DBTest, EmptyKey) {
do {
- ASSERT_OK(Put("", "v1"));
+ ASSERT_LEVELDB_OK(Put("", "v1"));
ASSERT_EQ("v1", Get(""));
- ASSERT_OK(Put("", "v2"));
+ ASSERT_LEVELDB_OK(Put("", "v2"));
ASSERT_EQ("v2", Get(""));
} while (ChangeOptions());
}
-TEST(DBTest, EmptyValue) {
+TEST_F(DBTest, EmptyValue) {
do {
- ASSERT_OK(Put("key", "v1"));
+ ASSERT_LEVELDB_OK(Put("key", "v1"));
ASSERT_EQ("v1", Get("key"));
- ASSERT_OK(Put("key", ""));
+ ASSERT_LEVELDB_OK(Put("key", ""));
ASSERT_EQ("", Get("key"));
- ASSERT_OK(Put("key", "v2"));
+ ASSERT_LEVELDB_OK(Put("key", "v2"));
ASSERT_EQ("v2", Get("key"));
} while (ChangeOptions());
}
-TEST(DBTest, ReadWrite) {
+TEST_F(DBTest, ReadWrite) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(Put("bar", "v2"));
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("bar", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
} while (ChangeOptions());
}
-TEST(DBTest, PutDeleteGet) {
+TEST_F(DBTest, PutDeleteGet) {
do {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
- ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetFromImmutableLayer) {
+TEST_F(DBTest, GetFromImmutableLayer) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
// Block sync calls.
@@ -607,17 +637,17 @@ TEST(DBTest, GetFromImmutableLayer) {
} while (ChangeOptions());
}
-TEST(DBTest, GetFromVersions) {
+TEST_F(DBTest, GetFromVersions) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v1", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetMemUsage) {
+TEST_F(DBTest, GetMemUsage) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
int mem_usage = std::stoi(val);
@@ -626,14 +656,14 @@ TEST(DBTest, GetMemUsage) {
} while (ChangeOptions());
}
-TEST(DBTest, GetSnapshot) {
+TEST_F(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
- ASSERT_OK(Put(key, "v1"));
+ ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
- ASSERT_OK(Put(key, "v2"));
+ ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
dbfull()->TEST_CompactMemTable();
@@ -644,16 +674,16 @@ TEST(DBTest, GetSnapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, GetIdenticalSnapshots) {
+TEST_F(DBTest, GetIdenticalSnapshots) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
- ASSERT_OK(Put(key, "v1"));
+ ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
const Snapshot* s2 = db_->GetSnapshot();
const Snapshot* s3 = db_->GetSnapshot();
- ASSERT_OK(Put(key, "v2"));
+ ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
ASSERT_EQ("v1", Get(key, s2));
@@ -669,13 +699,13 @@ TEST(DBTest, GetIdenticalSnapshots) {
} while (ChangeOptions());
}
-TEST(DBTest, IterateOverEmptySnapshot) {
+TEST_F(DBTest, IterateOverEmptySnapshot) {
do {
const Snapshot* snapshot = db_->GetSnapshot();
ReadOptions read_options;
read_options.snapshot = snapshot;
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Iterator* iterator1 = db_->NewIterator(read_options);
iterator1->SeekToFirst();
@@ -693,41 +723,41 @@ TEST(DBTest, IterateOverEmptySnapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, GetLevel0Ordering) {
+TEST_F(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
// below generates two level-0 files where the earlier one comes
// before the later one in the level-0 file list since the earlier
// one has a smaller "smallest" key.
- ASSERT_OK(Put("bar", "b"));
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("bar", "b"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetOrderedByLevels) {
+TEST_F(DBTest, GetOrderedByLevels) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
Compact("a", "z");
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetPicksCorrectFile) {
+TEST_F(DBTest, GetPicksCorrectFile) {
do {
// Arrange to have multiple files in a non-level-0 level.
- ASSERT_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("a", "va"));
Compact("a", "b");
- ASSERT_OK(Put("x", "vx"));
+ ASSERT_LEVELDB_OK(Put("x", "vx"));
Compact("x", "y");
- ASSERT_OK(Put("f", "vf"));
+ ASSERT_LEVELDB_OK(Put("f", "vf"));
Compact("f", "g");
ASSERT_EQ("va", Get("a"));
ASSERT_EQ("vf", Get("f"));
@@ -735,7 +765,7 @@ TEST(DBTest, GetPicksCorrectFile) {
} while (ChangeOptions());
}
-TEST(DBTest, GetEncountersEmptyLevel) {
+TEST_F(DBTest, GetEncountersEmptyLevel) {
do {
// Arrange for the following to happen:
// * sstable A in level 0
@@ -773,7 +803,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
} while (ChangeOptions());
}
-TEST(DBTest, IterEmpty) {
+TEST_F(DBTest, IterEmpty) {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -788,8 +818,8 @@ TEST(DBTest, IterEmpty) {
delete iter;
}
-TEST(DBTest, IterSingle) {
- ASSERT_OK(Put("a", "va"));
+TEST_F(DBTest, IterSingle) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -826,10 +856,10 @@ TEST(DBTest, IterSingle) {
delete iter;
}
-TEST(DBTest, IterMulti) {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", "vb"));
- ASSERT_OK(Put("c", "vc"));
+TEST_F(DBTest, IterMulti) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", "vb"));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -884,11 +914,11 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
- ASSERT_OK(Put("a", "va2"));
- ASSERT_OK(Put("a2", "va3"));
- ASSERT_OK(Put("b", "vb2"));
- ASSERT_OK(Put("c", "vc2"));
- ASSERT_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Put("a", "va2"));
+ ASSERT_LEVELDB_OK(Put("a2", "va3"));
+ ASSERT_LEVELDB_OK(Put("b", "vb2"));
+ ASSERT_LEVELDB_OK(Put("c", "vc2"));
+ ASSERT_LEVELDB_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
@@ -909,12 +939,12 @@ TEST(DBTest, IterMulti) {
delete iter;
}
-TEST(DBTest, IterSmallAndLargeMix) {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", std::string(100000, 'b')));
- ASSERT_OK(Put("c", "vc"));
- ASSERT_OK(Put("d", std::string(100000, 'd')));
- ASSERT_OK(Put("e", std::string(100000, 'e')));
+TEST_F(DBTest, IterSmallAndLargeMix) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
+ ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
+ ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
Iterator* iter = db_->NewIterator(ReadOptions());
@@ -947,12 +977,30 @@ TEST(DBTest, IterSmallAndLargeMix) {
delete iter;
}
-TEST(DBTest, IterMultiWithDelete) {
+TEST_F(DBTest, IterMultiWithDelete) {
+ do {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", "vb"));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
+ ASSERT_LEVELDB_OK(Delete("b"));
+ ASSERT_EQ("NOT_FOUND", Get("b"));
+
+ Iterator* iter = db_->NewIterator(ReadOptions());
+ iter->Seek("c");
+ ASSERT_EQ(IterStatus(iter), "c->vc");
+ iter->Prev();
+ ASSERT_EQ(IterStatus(iter), "a->va");
+ delete iter;
+ } while (ChangeOptions());
+}
+
+TEST_F(DBTest, IterMultiWithDeleteAndCompaction) {
do {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", "vb"));
- ASSERT_OK(Put("c", "vc"));
- ASSERT_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Put("b", "vb"));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ dbfull()->TEST_CompactMemTable();
+ ASSERT_LEVELDB_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
@@ -960,39 +1008,41 @@ TEST(DBTest, IterMultiWithDelete) {
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
+ iter->Seek("b");
+ ASSERT_EQ(IterStatus(iter), "c->vc");
delete iter;
} while (ChangeOptions());
}
-TEST(DBTest, Recover) {
+TEST_F(DBTest, Recover) {
do {
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("baz", "v5"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("baz", "v5"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v5", Get("baz"));
- ASSERT_OK(Put("bar", "v2"));
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("bar", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
- ASSERT_OK(Put("foo", "v4"));
+ ASSERT_LEVELDB_OK(Put("foo", "v4"));
ASSERT_EQ("v4", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v5", Get("baz"));
} while (ChangeOptions());
}
-TEST(DBTest, RecoveryWithEmptyLog) {
+TEST_F(DBTest, RecoveryWithEmptyLog) {
do {
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Reopen();
Reopen();
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
} while (ChangeOptions());
@@ -1000,7 +1050,7 @@ TEST(DBTest, RecoveryWithEmptyLog) {
// Check that writes done during a memtable compaction are recovered
// if the database is shutdown during the memtable compaction.
-TEST(DBTest, RecoverDuringMemtableCompaction) {
+TEST_F(DBTest, RecoverDuringMemtableCompaction) {
do {
Options options = CurrentOptions();
options.env = env_;
@@ -1008,10 +1058,12 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
Reopen(&options);
// Trigger a long memtable compaction and reopen the database during it
- ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
- ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
- ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
- ASSERT_OK(Put("bar", "v2")); // Goes to new log file
+ ASSERT_LEVELDB_OK(Put("foo", "v1")); // Goes to 1st log file
+ ASSERT_LEVELDB_OK(
+ Put("big1", std::string(10000000, 'x'))); // Fills memtable
+ ASSERT_LEVELDB_OK(
+ Put("big2", std::string(1000, 'y'))); // Triggers compaction
+ ASSERT_LEVELDB_OK(Put("bar", "v2")); // Goes to new log file
Reopen(&options);
ASSERT_EQ("v1", Get("foo"));
@@ -1023,11 +1075,11 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
static std::string Key(int i) {
char buf[100];
- snprintf(buf, sizeof(buf), "key%06d", i);
+ std::snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
-TEST(DBTest, MinorCompactionsHappen) {
+TEST_F(DBTest, MinorCompactionsHappen) {
Options options = CurrentOptions();
options.write_buffer_size = 10000;
Reopen(&options);
@@ -1036,7 +1088,7 @@ TEST(DBTest, MinorCompactionsHappen) {
int starting_num_tables = TotalTableFiles();
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
}
int ending_num_tables = TotalTableFiles();
ASSERT_GT(ending_num_tables, starting_num_tables);
@@ -1052,14 +1104,14 @@ TEST(DBTest, MinorCompactionsHappen) {
}
}
-TEST(DBTest, RecoverWithLargeLog) {
+TEST_F(DBTest, RecoverWithLargeLog) {
{
Options options = CurrentOptions();
Reopen(&options);
- ASSERT_OK(Put("big1", std::string(200000, '1')));
- ASSERT_OK(Put("big2", std::string(200000, '2')));
- ASSERT_OK(Put("small3", std::string(10, '3')));
- ASSERT_OK(Put("small4", std::string(10, '4')));
+ ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
+ ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
+ ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
+ ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}
@@ -1076,7 +1128,7 @@ TEST(DBTest, RecoverWithLargeLog) {
ASSERT_GT(NumTableFilesAtLevel(0), 1);
}
-TEST(DBTest, CompactionsGenerateMultipleFiles) {
+TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
@@ -1088,7 +1140,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
std::vector values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
- ASSERT_OK(Put(Key(i), values[i]));
+ ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
}
// Reopening moves updates to level-0
@@ -1102,7 +1154,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
}
}
-TEST(DBTest, RepeatedWritesToSameKey) {
+TEST_F(DBTest, RepeatedWritesToSameKey) {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
@@ -1117,11 +1169,11 @@ TEST(DBTest, RepeatedWritesToSameKey) {
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
- fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
+ std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
}
}
-TEST(DBTest, SparseMerge) {
+TEST_F(DBTest, SparseMerge) {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen(&options);
@@ -1139,7 +1191,7 @@ TEST(DBTest, SparseMerge) {
// Write approximately 100MB of "B" values
for (int i = 0; i < 100000; i++) {
char key[100];
- snprintf(key, sizeof(key), "B%010d", i);
+ std::snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
Put("C", "vc");
@@ -1164,14 +1216,14 @@ TEST(DBTest, SparseMerge) {
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
- fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val), (unsigned long long)(low),
- (unsigned long long)(high));
+ std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
+ (unsigned long long)(val), (unsigned long long)(low),
+ (unsigned long long)(high));
}
return result;
}
-TEST(DBTest, ApproximateSizes) {
+TEST_F(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
@@ -1189,7 +1241,7 @@ TEST(DBTest, ApproximateSizes) {
static const int S2 = 105000; // Allow some expansion from metadata
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
+ ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
}
// 0 because GetApproximateSizes() does not account for memtable space
@@ -1230,7 +1282,7 @@ TEST(DBTest, ApproximateSizes) {
} while (ChangeOptions());
}
-TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
+TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
@@ -1238,18 +1290,18 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
Random rnd(301);
std::string big1 = RandomString(&rnd, 100000);
- ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(2), big1));
- ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(4), big1));
- ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
- ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(2), big1));
+ ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(4), big1));
+ ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
+ ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
if (options.reuse_logs) {
// Need to force a memtable compaction since recovery does not do so.
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
}
// Check sizes across recovery by reopening a few times
@@ -1273,7 +1325,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
} while (ChangeOptions());
}
-TEST(DBTest, IteratorPinsRef) {
+TEST_F(DBTest, IteratorPinsRef) {
Put("foo", "hello");
// Get iterator that will yield the current contents of the DB.
@@ -1282,7 +1334,8 @@ TEST(DBTest, IteratorPinsRef) {
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+ ASSERT_LEVELDB_OK(
+ Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
@@ -1295,7 +1348,7 @@ TEST(DBTest, IteratorPinsRef) {
delete iter;
}
-TEST(DBTest, Snapshot) {
+TEST_F(DBTest, Snapshot) {
do {
Put("foo", "v1");
const Snapshot* s1 = db_->GetSnapshot();
@@ -1324,7 +1377,7 @@ TEST(DBTest, Snapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, HiddenValuesAreRemoved) {
+TEST_F(DBTest, HiddenValuesAreRemoved) {
do {
Random rnd(301);
FillLevels("a", "z");
@@ -1336,7 +1389,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
Put("foo", "tiny");
Put("pastfoo2", "v2"); // Advance sequence number one more
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(big, Get("foo", snapshot));
@@ -1355,9 +1408,9 @@ TEST(DBTest, HiddenValuesAreRemoved) {
} while (ChangeOptions());
}
-TEST(DBTest, DeletionMarkers1) {
+TEST_F(DBTest, DeletionMarkers1) {
Put("foo", "v1");
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
@@ -1371,7 +1424,7 @@ TEST(DBTest, DeletionMarkers1) {
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
- ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
@@ -1384,9 +1437,9 @@ TEST(DBTest, DeletionMarkers1) {
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
}
-TEST(DBTest, DeletionMarkers2) {
+TEST_F(DBTest, DeletionMarkers2) {
Put("foo", "v1");
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
@@ -1399,7 +1452,7 @@ TEST(DBTest, DeletionMarkers2) {
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
// DEL kept: "last" file overlaps
@@ -1410,17 +1463,17 @@ TEST(DBTest, DeletionMarkers2) {
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
}
-TEST(DBTest, OverlapInLevel0) {
+TEST_F(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
// Fill levels 1 and 2 to disable the pushing of new memtables to levels >
// 0.
- ASSERT_OK(Put("100", "v100"));
- ASSERT_OK(Put("999", "v999"));
+ ASSERT_LEVELDB_OK(Put("100", "v100"));
+ ASSERT_LEVELDB_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Delete("100"));
- ASSERT_OK(Delete("999"));
+ ASSERT_LEVELDB_OK(Delete("100"));
+ ASSERT_LEVELDB_OK(Delete("999"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("0,1,1", FilesPerLevel());
@@ -1428,12 +1481,12 @@ TEST(DBTest, OverlapInLevel0) {
// files[0] 200 .. 900
// files[1] 300 .. 500
// Note that files are sorted by smallest key.
- ASSERT_OK(Put("300", "v300"));
- ASSERT_OK(Put("500", "v500"));
+ ASSERT_LEVELDB_OK(Put("300", "v300"));
+ ASSERT_LEVELDB_OK(Put("500", "v500"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Put("200", "v200"));
- ASSERT_OK(Put("600", "v600"));
- ASSERT_OK(Put("900", "v900"));
+ ASSERT_LEVELDB_OK(Put("200", "v200"));
+ ASSERT_LEVELDB_OK(Put("600", "v600"));
+ ASSERT_LEVELDB_OK(Put("900", "v900"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("2,1,1", FilesPerLevel());
@@ -1445,23 +1498,23 @@ TEST(DBTest, OverlapInLevel0) {
// Do a memtable compaction. Before bug-fix, the compaction would
// not detect the overlap with level-0 files and would incorrectly place
// the deletion in a deeper level.
- ASSERT_OK(Delete("600"));
+ ASSERT_LEVELDB_OK(Delete("600"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("3", FilesPerLevel());
ASSERT_EQ("NOT_FOUND", Get("600"));
} while (ChangeOptions());
}
-TEST(DBTest, L0_CompactionBug_Issue44_a) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
Reopen();
- ASSERT_OK(Put("b", "v"));
+ ASSERT_LEVELDB_OK(Put("b", "v"));
Reopen();
- ASSERT_OK(Delete("b"));
- ASSERT_OK(Delete("a"));
+ ASSERT_LEVELDB_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
- ASSERT_OK(Delete("a"));
+ ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
- ASSERT_OK(Put("a", "v"));
+ ASSERT_LEVELDB_OK(Put("a", "v"));
Reopen();
Reopen();
ASSERT_EQ("(a->v)", Contents());
@@ -1469,7 +1522,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
ASSERT_EQ("(a->v)", Contents());
}
-TEST(DBTest, L0_CompactionBug_Issue44_b) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
Put("", "");
Reopen();
@@ -1495,16 +1548,16 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ("(->)(c->cv)", Contents());
}
-TEST(DBTest, Fflush_Issue474) {
+TEST_F(DBTest, Fflush_Issue474) {
static const int kNum = 100000;
Random rnd(test::RandomSeed());
for (int i = 0; i < kNum; i++) {
- fflush(nullptr);
- ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+ std::fflush(nullptr);
+ ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
}
}
-TEST(DBTest, ComparatorCheck) {
+TEST_F(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
const char* Name() const override { return "leveldb.NewComparator"; }
@@ -1527,7 +1580,7 @@ TEST(DBTest, ComparatorCheck) {
<< s.ToString();
}
-TEST(DBTest, CustomComparator) {
+TEST_F(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
const char* Name() const override { return "test.NumberComparator"; }
@@ -1545,11 +1598,11 @@ TEST(DBTest, CustomComparator) {
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
- ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
+ EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
- ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
+ EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
<< EscapeString(x);
return val;
}
@@ -1561,8 +1614,8 @@ TEST(DBTest, CustomComparator) {
new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
- ASSERT_OK(Put("[10]", "ten"));
- ASSERT_OK(Put("[0x14]", "twenty"));
+ ASSERT_LEVELDB_OK(Put("[10]", "ten"));
+ ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
for (int i = 0; i < 2; i++) {
ASSERT_EQ("ten", Get("[10]"));
ASSERT_EQ("ten", Get("[0xa]"));
@@ -1576,14 +1629,14 @@ TEST(DBTest, CustomComparator) {
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
- snprintf(buf, sizeof(buf), "[%d]", i * 10);
- ASSERT_OK(Put(buf, buf));
+ std::snprintf(buf, sizeof(buf), "[%d]", i * 10);
+ ASSERT_LEVELDB_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
}
}
-TEST(DBTest, ManualCompaction) {
+TEST_F(DBTest, ManualCompaction) {
ASSERT_EQ(config::kMaxMemCompactLevel, 2)
<< "Need to update this test to match kMaxMemCompactLevel";
@@ -1617,8 +1670,8 @@ TEST(DBTest, ManualCompaction) {
ASSERT_EQ("0,0,1", FilesPerLevel());
}
-TEST(DBTest, DBOpen_Options) {
- std::string dbname = test::TmpDir() + "/db_options_test";
+TEST_F(DBTest, DBOpen_Options) {
+ std::string dbname = testing::TempDir() + "db_options_test";
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
@@ -1632,7 +1685,7 @@ TEST(DBTest, DBOpen_Options) {
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
@@ -1649,50 +1702,56 @@ TEST(DBTest, DBOpen_Options) {
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
}
-TEST(DBTest, DestroyEmptyDir) {
- std::string dbname = test::TmpDir() + "/db_empty_dir";
+TEST_F(DBTest, DestroyEmptyDir) {
+ std::string dbname = testing::TempDir() + "db_empty_dir";
TestEnv env(Env::Default());
- env.DeleteDir(dbname);
+ env.RemoveDir(dbname);
ASSERT_TRUE(!env.FileExists(dbname));
Options opts;
opts.env = &env;
- ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
std::vector children;
- ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
+#if defined(LEVELDB_PLATFORM_CHROMIUM)
+ // TODO(https://crbug.com/1428746): Chromium's file system abstraction always
+ // filters out '.' and '..'.
+ ASSERT_EQ(0, children.size());
+#else
// The stock Env's do not filter out '.' and '..' special files.
ASSERT_EQ(2, children.size());
- ASSERT_OK(DestroyDB(dbname, opts));
+#endif // defined(LEVELDB_PLATFORM_CHROMIUM)
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
// Should also be destroyed if Env is filtering out dot files.
env.SetIgnoreDotFiles(true);
- ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
- ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
ASSERT_EQ(0, children.size());
- ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
}
-TEST(DBTest, DestroyOpenDB) {
- std::string dbname = test::TmpDir() + "/open_db_dir";
- env_->DeleteDir(dbname);
+TEST_F(DBTest, DestroyOpenDB) {
+ std::string dbname = testing::TempDir() + "open_db_dir";
+ env_->RemoveDir(dbname);
ASSERT_TRUE(!env_->FileExists(dbname));
Options opts;
opts.create_if_missing = true;
DB* db = nullptr;
- ASSERT_OK(DB::Open(opts, dbname, &db));
+ ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
ASSERT_TRUE(db != nullptr);
// Must fail to destroy an open db.
@@ -1704,23 +1763,23 @@ TEST(DBTest, DestroyOpenDB) {
db = nullptr;
// Should succeed destroying a closed db.
- ASSERT_OK(DestroyDB(dbname, Options()));
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
ASSERT_TRUE(!env_->FileExists(dbname));
}
-TEST(DBTest, Locking) {
+TEST_F(DBTest, Locking) {
DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
// Check that number of files does not grow when we are out of space
-TEST(DBTest, NoSpace) {
+TEST_F(DBTest, NoSpace) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
@@ -1735,18 +1794,18 @@ TEST(DBTest, NoSpace) {
ASSERT_LT(CountFiles(), num_files + 3);
}
-TEST(DBTest, NonWritableFileSystem) {
+TEST_F(DBTest, NonWritableFileSystem) {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.env = env_;
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
// Force errors for new files.
env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
- fprintf(stderr, "iter %d; errors %d\n", i, errors);
+ std::fprintf(stderr, "iter %d; errors %d\n", i, errors);
if (!Put("foo", big).ok()) {
errors++;
DelayMilliseconds(100);
@@ -1756,7 +1815,7 @@ TEST(DBTest, NonWritableFileSystem) {
env_->non_writable_.store(false, std::memory_order_release);
}
-TEST(DBTest, WriteSyncError) {
+TEST_F(DBTest, WriteSyncError) {
// Check that log sync errors cause the DB to disallow future writes.
// (a) Cause log sync calls to fail
@@ -1767,7 +1826,7 @@ TEST(DBTest, WriteSyncError) {
// (b) Normal write should succeed
WriteOptions w;
- ASSERT_OK(db_->Put(w, "k1", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
ASSERT_EQ("v1", Get("k1"));
// (c) Do a sync write; should fail
@@ -1787,7 +1846,7 @@ TEST(DBTest, WriteSyncError) {
ASSERT_EQ("NOT_FOUND", Get("k3"));
}
-TEST(DBTest, ManifestWriteError) {
+TEST_F(DBTest, ManifestWriteError) {
// Test for the following problem:
// (a) Compaction produces file F
// (b) Log record containing F is written to MANIFEST file, but Sync() fails
@@ -1806,7 +1865,7 @@ TEST(DBTest, ManifestWriteError) {
options.create_if_missing = true;
options.error_if_exists = false;
DestroyAndReopen(&options);
- ASSERT_OK(Put("foo", "bar"));
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Memtable compaction (will succeed)
@@ -1827,8 +1886,8 @@ TEST(DBTest, ManifestWriteError) {
}
}
-TEST(DBTest, MissingSSTFile) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, MissingSSTFile) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Dump the memtable to disk.
@@ -1844,8 +1903,8 @@ TEST(DBTest, MissingSSTFile) {
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
-TEST(DBTest, StillReadSST) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, StillReadSST) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Dump the memtable to disk.
@@ -1860,18 +1919,18 @@ TEST(DBTest, StillReadSST) {
ASSERT_EQ("bar", Get("foo"));
}
-TEST(DBTest, FilesDeletedAfterCompaction) {
- ASSERT_OK(Put("foo", "v2"));
+TEST_F(DBTest, FilesDeletedAfterCompaction) {
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
const int num_files = CountFiles();
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
}
ASSERT_EQ(CountFiles(), num_files);
}
-TEST(DBTest, BloomFilter) {
+TEST_F(DBTest, BloomFilter) {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
@@ -1882,11 +1941,11 @@ TEST(DBTest, BloomFilter) {
// Populate multiple layers
const int N = 10000;
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), Key(i)));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
Compact("a", "z");
for (int i = 0; i < N; i += 100) {
- ASSERT_OK(Put(Key(i), Key(i)));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
dbfull()->TEST_CompactMemTable();
@@ -1899,7 +1958,7 @@ TEST(DBTest, BloomFilter) {
ASSERT_EQ(Key(i), Get(Key(i)));
}
int reads = env_->random_read_counter_.Read();
- fprintf(stderr, "%d present => %d reads\n", N, reads);
+ std::fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
ASSERT_LE(reads, N + 2 * N / 100);
@@ -1909,7 +1968,7 @@ TEST(DBTest, BloomFilter) {
ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
}
reads = env_->random_read_counter_.Read();
- fprintf(stderr, "%d missing => %d reads\n", N, reads);
+ std::fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3 * N / 100);
env_->delay_data_sync_.store(false, std::memory_order_release);
@@ -1918,6 +1977,33 @@ TEST(DBTest, BloomFilter) {
delete options.filter_policy;
}
+TEST_F(DBTest, LogCloseError) {
+ // Regression test for bug where we could ignore log file
+ // Close() error when switching to a new log file.
+ const int kValueSize = 20000;
+ const int kWriteCount = 10;
+ const int kWriteBufferSize = (kValueSize * kWriteCount) / 2;
+
+ Options options = CurrentOptions();
+ options.env = env_;
+ options.write_buffer_size = kWriteBufferSize; // Small write buffer
+ Reopen(&options);
+ env_->log_file_close_.store(true, std::memory_order_release);
+
+ std::string value(kValueSize, 'x');
+ Status s;
+ for (int i = 0; i < kWriteCount && s.ok(); i++) {
+ s = Put(Key(i), value);
+ }
+ ASSERT_TRUE(!s.ok()) << "succeeded even after log file Close failure";
+
+ // Future writes should also fail after an earlier error.
+ s = Put("hello", "world");
+ ASSERT_TRUE(!s.ok()) << "write succeeded after log file Close failure";
+
+ env_->log_file_close_.store(false, std::memory_order_release);
+}
+
// Multi-threaded test:
namespace {
@@ -1942,7 +2028,7 @@ static void MTThreadBody(void* arg) {
int id = t->id;
DB* db = t->state->test->db_;
int counter = 0;
- fprintf(stderr, "... starting thread %d\n", id);
+ std::fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
@@ -1951,14 +2037,14 @@ static void MTThreadBody(void* arg) {
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
- snprintf(keybuf, sizeof(keybuf), "%016d", key);
+ std::snprintf(keybuf, sizeof(keybuf), "%016d", key);
if (rnd.OneIn(2)) {
// Write values of the form .
// We add some padding for force compactions.
- snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
- static_cast(counter));
- ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
+ std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+ static_cast(counter));
+ ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
@@ -1966,7 +2052,7 @@ static void MTThreadBody(void* arg) {
// Key has not yet been written
} else {
// Check that the writer thread counter is >= the counter in the value
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
int k, w, c;
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
ASSERT_EQ(k, key);
@@ -1978,12 +2064,12 @@ static void MTThreadBody(void* arg) {
counter++;
}
t->state->thread_done[id].store(true, std::memory_order_release);
- fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
+ std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
}
} // namespace
-TEST(DBTest, MultiThreaded) {
+TEST_F(DBTest, MultiThreaded) {
do {
// Initialize state
MTState mt;
@@ -2128,40 +2214,82 @@ static bool CompareIterators(int step, DB* model, DB* db,
Iterator* dbiter = db->NewIterator(options);
bool ok = true;
int count = 0;
+ std::vector seek_keys;
+ // Compare equality of all elements using Next(). Save some of the keys for
+ // comparing Seek equality.
for (miter->SeekToFirst(), dbiter->SeekToFirst();
ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
- fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
- EscapeString(miter->key()).c_str(),
- EscapeString(dbiter->key()).c_str());
+ std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
+ EscapeString(miter->key()).c_str(),
+ EscapeString(dbiter->key()).c_str());
ok = false;
break;
}
if (miter->value().compare(dbiter->value()) != 0) {
- fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
- step, EscapeString(miter->key()).c_str(),
- EscapeString(miter->value()).c_str(),
- EscapeString(miter->value()).c_str());
+ std::fprintf(stderr,
+ "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
+ step, EscapeString(miter->key()).c_str(),
+ EscapeString(miter->value()).c_str(),
+ EscapeString(miter->value()).c_str());
ok = false;
+ break;
+ }
+
+ if (count % 10 == 0) {
+ seek_keys.push_back(miter->key().ToString());
}
}
if (ok) {
if (miter->Valid() != dbiter->Valid()) {
- fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
- step, miter->Valid(), dbiter->Valid());
+ std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
+ step, miter->Valid(), dbiter->Valid());
ok = false;
}
}
- fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
+
+ if (ok) {
+ // Validate iterator equality when performing seeks.
+ for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end();
+ ++kiter) {
+ miter->Seek(*kiter);
+ dbiter->Seek(*kiter);
+ if (!miter->Valid() || !dbiter->Valid()) {
+ std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n",
+ step, miter->Valid(), dbiter->Valid());
+ ok = false;
+ }
+ if (miter->key().compare(dbiter->key()) != 0) {
+ std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n",
+ step, EscapeString(miter->key()).c_str(),
+ EscapeString(dbiter->key()).c_str());
+ ok = false;
+ break;
+ }
+
+ if (miter->value().compare(dbiter->value()) != 0) {
+ std::fprintf(
+ stderr,
+ "step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step,
+ EscapeString(miter->key()).c_str(),
+ EscapeString(miter->value()).c_str(),
+ EscapeString(miter->value()).c_str());
+ ok = false;
+ break;
+ }
+ }
+ }
+
+ std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
delete miter;
delete dbiter;
return ok;
}
-TEST(DBTest, Randomized) {
+TEST_F(DBTest, Randomized) {
Random rnd(test::RandomSeed());
do {
ModelDB model(CurrentOptions());
@@ -2171,7 +2299,7 @@ TEST(DBTest, Randomized) {
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
- fprintf(stderr, "Step %d of %d\n", step, N);
+ std::fprintf(stderr, "Step %d of %d\n", step, N);
}
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
@@ -2179,13 +2307,13 @@ TEST(DBTest, Randomized) {
k = RandomKey(&rnd);
v = RandomString(
&rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
- ASSERT_OK(model.Put(WriteOptions(), k, v));
- ASSERT_OK(db_->Put(WriteOptions(), k, v));
+ ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) { // Delete
k = RandomKey(&rnd);
- ASSERT_OK(model.Delete(WriteOptions(), k));
- ASSERT_OK(db_->Delete(WriteOptions(), k));
+ ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
} else { // Multi-element batch
WriteBatch b;
@@ -2204,8 +2332,8 @@ TEST(DBTest, Randomized) {
b.Delete(k);
}
}
- ASSERT_OK(model.Write(WriteOptions(), &b));
- ASSERT_OK(db_->Write(WriteOptions(), &b));
+ ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
+ ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
}
if ((step % 100) == 0) {
@@ -2229,74 +2357,4 @@ TEST(DBTest, Randomized) {
} while (ChangeOptions());
}
-std::string MakeKey(unsigned int num) {
- char buf[30];
- snprintf(buf, sizeof(buf), "%016u", num);
- return std::string(buf);
-}
-
-void BM_LogAndApply(int iters, int num_base_files) {
- std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
- DestroyDB(dbname, Options());
-
- DB* db = nullptr;
- Options opts;
- opts.create_if_missing = true;
- Status s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
- ASSERT_TRUE(db != nullptr);
-
- delete db;
- db = nullptr;
-
- Env* env = Env::Default();
-
- port::Mutex mu;
- MutexLock l(&mu);
-
- InternalKeyComparator cmp(BytewiseComparator());
- Options options;
- VersionSet vset(dbname, &options, nullptr, &cmp);
- bool save_manifest;
- ASSERT_OK(vset.Recover(&save_manifest));
- VersionEdit vbase;
- uint64_t fnum = 1;
- for (int i = 0; i < num_base_files; i++) {
- InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
- vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
- }
- ASSERT_OK(vset.LogAndApply(&vbase, &mu));
-
- uint64_t start_micros = env->NowMicros();
-
- for (int i = 0; i < iters; i++) {
- VersionEdit vedit;
- vedit.DeleteFile(2, fnum);
- InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
- vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
- vset.LogAndApply(&vedit, &mu);
- }
- uint64_t stop_micros = env->NowMicros();
- unsigned int us = stop_micros - start_micros;
- char buf[16];
- snprintf(buf, sizeof(buf), "%d", num_base_files);
- fprintf(stderr,
- "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf,
- iters, us, ((float)us) / iters);
-}
-
} // namespace leveldb
-
-int main(int argc, char** argv) {
- if (argc > 1 && std::string(argv[1]) == "--benchmark") {
- leveldb::BM_LogAndApply(1000, 1);
- leveldb::BM_LogAndApply(1000, 100);
- leveldb::BM_LogAndApply(1000, 10000);
- leveldb::BM_LogAndApply(100, 100000);
- return 0;
- }
-
- return leveldb::test::RunAllTests();
-}
diff --git a/src/leveldb/db/dbformat.cc b/src/leveldb/db/dbformat.cc
index 459eddf5b136..2a5749f8bbbb 100644
--- a/src/leveldb/db/dbformat.cc
+++ b/src/leveldb/db/dbformat.cc
@@ -4,8 +4,7 @@
#include "db/dbformat.h"
-#include
-
+#include
#include
#include "port/port.h"
@@ -127,7 +126,7 @@ LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
start_ = dst;
dst = EncodeVarint32(dst, usize + 8);
kstart_ = dst;
- memcpy(dst, user_key.data(), usize);
+ std::memcpy(dst, user_key.data(), usize);
dst += usize;
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
dst += 8;
diff --git a/src/leveldb/db/dbformat_test.cc b/src/leveldb/db/dbformat_test.cc
index 1209369c31a0..7f3f81a5da55 100644
--- a/src/leveldb/db/dbformat_test.cc
+++ b/src/leveldb/db/dbformat_test.cc
@@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/dbformat.h"
+
+#include "gtest/gtest.h"
#include "util/logging.h"
-#include "util/testharness.h"
namespace leveldb {
@@ -41,8 +42,6 @@ static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
-class FormatTest {};
-
TEST(FormatTest, InternalKey_EncodeDecode) {
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
const uint64_t seq[] = {1,
@@ -127,5 +126,3 @@ TEST(FormatTest, InternalKeyDebugString) {
}
} // namespace leveldb
-
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/src/leveldb/db/dumpfile.cc b/src/leveldb/db/dumpfile.cc
index 77d59003cf96..6085475fd915 100644
--- a/src/leveldb/db/dumpfile.cc
+++ b/src/leveldb/db/dumpfile.cc
@@ -4,7 +4,7 @@
#include "leveldb/dumpfile.h"
-#include
+#include
#include "db/dbformat.h"
#include "db/filename.h"
diff --git a/src/leveldb/db/fault_injection_test.cc b/src/leveldb/db/fault_injection_test.cc
index bf705cb60f24..ef864a4f04a7 100644
--- a/src/leveldb/db/fault_injection_test.cc
+++ b/src/leveldb/db/fault_injection_test.cc
@@ -9,6 +9,7 @@
#include
Note about Ext4 Filesystems
-The preceding numbers are for an ext3 file system. Synchronous writes are much slower under ext4 (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of fsync / msync calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues fsync calls when switching to a new file.
+The preceding numbers are for an ext3 file system. Synchronous writes are much slower under ext4 (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of fsync / msync calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues fsync calls when switching to a new file.
Acknowledgements
Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.
diff --git a/src/leveldb/doc/impl.md b/src/leveldb/doc/impl.md
index cacabb96fc70..c9bb62174673 100644
--- a/src/leveldb/doc/impl.md
+++ b/src/leveldb/doc/impl.md
@@ -1,7 +1,7 @@
## Files
The implementation of leveldb is similar in spirit to the representation of a
-single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html).
+single [Bigtable tablet (section 5.3)](https://research.google/pubs/pub27898/).
However the organization of the files that make up the representation is
somewhat different and is explained below.
@@ -166,7 +166,7 @@ So maybe even the sharding is not necessary on modern filesystems?
## Garbage collection of files
-`DeleteObsoleteFiles()` is called at the end of every compaction and at the end
+`RemoveObsoleteFiles()` is called at the end of every compaction and at the end
of recovery. It finds the names of all files in the database. It deletes all log
files that are not the current log file. It deletes all table files that are not
referenced from some level and are not the output of an active compaction.
diff --git a/src/leveldb/doc/index.md b/src/leveldb/doc/index.md
index 3d9a25805b7b..0f6d64917803 100644
--- a/src/leveldb/doc/index.md
+++ b/src/leveldb/doc/index.md
@@ -369,6 +369,7 @@ leveldb::Iterator* it = db->NewIterator(options);
for (it->SeekToFirst(); it->Valid(); it->Next()) {
...
}
+delete it;
```
### Key Layout
@@ -424,21 +425,21 @@ spaces. For example:
```c++
class CustomFilterPolicy : public leveldb::FilterPolicy {
private:
- FilterPolicy* builtin_policy_;
+ leveldb::FilterPolicy* builtin_policy_;
public:
- CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {}
+ CustomFilterPolicy() : builtin_policy_(leveldb::NewBloomFilterPolicy(10)) {}
~CustomFilterPolicy() { delete builtin_policy_; }
const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
- void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+ void CreateFilter(const leveldb::Slice* keys, int n, std::string* dst) const {
// Use builtin bloom filter code after removing trailing spaces
- std::vector trimmed(n);
+ std::vector trimmed(n);
for (int i = 0; i < n; i++) {
trimmed[i] = RemoveTrailingSpaces(keys[i]);
}
- return builtin_policy_->CreateFilter(&trimmed[i], n, dst);
+ builtin_policy_->CreateFilter(trimmed.data(), n, dst);
}
};
```
@@ -478,7 +479,7 @@ leveldb::Range ranges[2];
ranges[0] = leveldb::Range("a", "c");
ranges[1] = leveldb::Range("x", "z");
uint64_t sizes[2];
-leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes);
+db->GetApproximateSizes(ranges, 2, sizes);
```
The preceding call will set `sizes[0]` to the approximate number of bytes of
diff --git a/src/leveldb/helpers/memenv/memenv.cc b/src/leveldb/helpers/memenv/memenv.cc
index 47e4481f7c55..e47661330e93 100644
--- a/src/leveldb/helpers/memenv/memenv.cc
+++ b/src/leveldb/helpers/memenv/memenv.cc
@@ -4,8 +4,7 @@
#include "helpers/memenv/memenv.h"
-#include
-
+#include
#include
#include