diff --git a/CMakeLists.txt b/CMakeLists.txt index b52ea8f4..9ec6d2c3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -45,6 +45,35 @@ function (die msg) message(FATAL_ERROR "${BoldRed}${msg}${ColourReset}") endfunction () +if (NOT ${ARCH} STREQUAL "") + string(SUBSTRING ${ARCH} 0 5 ARM_TEST) + string(TOLOWER ${ARM_TEST} ARM_TEST) + + if (${ARM_TEST} STREQUAL "armv6") + set(ARM6 1) + else() + set(ARM6 0) + endif() + + if (${ARM_TEST} STREQUAL "armv7") + set(ARM7 1) + else() + set(ARM7 0) + endif() +endif() + +if(WIN32 OR ARM7 OR ARM6) + set(CMAKE_C_FLAGS_RELEASE "-O2 -DNDEBUG") + set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG") +endif() + +# set this to 0 if per-block checkpoint needs to be disabled +set(PER_BLOCK_CHECKPOINT 1) + +if(PER_BLOCK_CHECKPOINT) + add_definitions("-DPER_BLOCK_CHECKPOINT") +endif() + list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_SOURCE_DIR}/cmake") @@ -156,14 +185,42 @@ if (DEFINED ENV{DATABASE}) else() message(STATUS "Could not find DATABASE in env (not required unless you want to change database type from default: ${DATABASE})") endif() + +set(BERKELEY_DB 0) if (DATABASE STREQUAL "lmdb") set(BLOCKCHAIN_DB DB_LMDB) + + # temporarily allow mingw to compile with berkeley_db, + # regardless if building static or not + if(NOT STATIC OR MINGW) + find_package(BerkeleyDB) + + if(NOT BERKELEY_DB_LIBRARIES) + message(STATUS "BerkeleyDB not found and has been disabled.") + else() + message(STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}") + if(BERKELEY_DB_LIBRARIES) + message(STATUS "Found BerkeleyDB shared library") + set(BDB_STATIC false CACHE BOOL "BDB Static flag") + set(BDB_INCLUDE ${BERKELEY_DB_INCLUDE_DIR} CACHE STRING "BDB include path") + set(BDB_LIBRARY ${BERKELEY_DB_LIBRARIES} CACHE STRING "BDB library name") + set(BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs") + set(BERKELEY_DB 1) + else() + message(STATUS "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev or the equivalent") + endif() + endif() + endif() elseif (DATABASE STREQUAL "memory") set(BLOCKCHAIN_DB DB_MEMORY) else() die("Invalid database type: ${DATABASE}") endif() +if(BERKELEY_DB) + add_definitions("-DBERKELEY_DB") +endif() + add_definitions("-DBLOCKCHAIN_DB=${BLOCKCHAIN_DB}") if (UNIX AND NOT APPLE) @@ -192,7 +249,7 @@ include_directories(external/rapidjson) include_directories(${LMDB_INCLUDE}) # Final setup for Berkeley DB -if (NOT STATIC) +if (BERKELEY_DB) include_directories(${BDB_INCLUDE}) endif() @@ -208,13 +265,14 @@ if(MSVC) include_directories(SYSTEM src/platform/msc) else() set(ARCH native CACHE STRING "CPU to build for: -march value or default") - if(ARCH STREQUAL "default") + # -march=armv7-a conflicts with -mcpu=cortex-a7 + if(ARCH STREQUAL "default" OR ARM7) set(ARCH_FLAG "") else() if(ARCH STREQUAL "x86_64") set(ARCH_FLAG "-march=x86-64") else() - set(ARCH_FLAG "-march=${ARCH}") + set(ARCH_FLAG "-march=${ARCH}") endif() endif() set(WARNINGS "-Wall -Wextra -Wpointer-arith -Wundef -Wvla -Wwrite-strings -Wno-error=extra -Wno-error=deprecated-declarations -Wno-error=sign-compare -Wno-error=strict-aliasing -Wno-error=type-limits -Wno-unused-parameter -Wno-error=unused-variable -Wno-error=undef -Wno-error=uninitialized") @@ -258,14 +316,18 @@ else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG} -maes") endif() - string(SUBSTRING ${ARCH} 0 3 ARM_TEST) - string(TOLOWER ${ARM_TEST} ARM_TEST) - if(${ARM_TEST} STREQUAL "arm") - message(STATUS "Setting ARM C and C++ flags") + if(ARM6) + message(STATUS "Setting ARM6 C and C++ flags") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp -mfloat-abi=hard") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp -mfloat-abi=hard") endif() + if(ARM7) + message(STATUS "Setting ARM7 C and C++ flags") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2 -mcpu=cortex-a7 -mfloat-abi=hard -mfpu=vfpv4 -funsafe-math-optimizations -mtune=cortex-a7") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -mcpu=cortex-a7 -mfloat-abi=hard -mfpu=vfpv4 -funsafe-math-optimizations -mtune=cortex-a7") + endif() + if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_HAS_TR1_TUPLE=0") endif() diff --git a/Makefile b/Makefile index 329f9c4a..dda8663b 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,14 @@ release-all: mkdir -p build/release cd build/release && cmake -D BUILD_TESTS=ON -D CMAKE_BUILD_TYPE=release ../.. && $(MAKE) +release-arm6: + mkdir -p build/release + cd build/release && cmake -D BUILD_TESTS=OFF -D ARCH="armv6zk" -D BUILD_64=OFF -D NO_AES=ON -D CMAKE_BUILD_TYPE=release ../.. && $(MAKE) + +release-arm7: + mkdir -p build/release + cd build/release && cmake -D BUILD_TESTS=OFF -D ARCH="armv7-a" -D BUILD_64=OFF -D NO_AES=ON -D CMAKE_BUILD_TYPE=release ../.. && $(MAKE) + release-static: release-static-64 release-static-64: @@ -68,10 +76,6 @@ release-static-32: mkdir -p build/release cd build/release && cmake -D STATIC=ON -D ARCH="i686" -D BUILD_64=OFF -D CMAKE_BUILD_TYPE=release ../.. && $(MAKE) -release-static-arm6: - mkdir -p build/release - cd build/release && cmake -D STATIC=ON -D ARCH="armv6zk" -D BUILD_64=OFF -D NO_AES=ON -D CMAKE_BUILD_TYPE=release ../.. && $(MAKE) - release-static-win64: mkdir -p build/release cd build/release && cmake -G "MSYS Makefiles" -D STATIC=ON -D ARCH="x86-64" -D BUILD_64=ON -D CMAKE_BUILD_TYPE=Release -D CMAKE_TOOLCHAIN_FILE=../../cmake/64-bit-toolchain.cmake -D MSYS2_FOLDER=c:/msys64 ../.. && $(MAKE) diff --git a/external/db_drivers/CMakeLists.txt b/external/db_drivers/CMakeLists.txt index cd747972..4d1b95c9 100644 --- a/external/db_drivers/CMakeLists.txt +++ b/external/db_drivers/CMakeLists.txt @@ -31,24 +31,4 @@ message(STATUS "Using ${ARCH_WIDTH}-bit LMDB from source tree") add_subdirectory(liblmdb${ARCH_WIDTH}) set(LMDB_INCLUDE "${CMAKE_CURRENT_SOURCE_DIR}/liblmdb${ARCH_WIDTH}" CACHE STRING "LMDB Include path") - set(LMDB_LIBRARY "lmdb" CACHE STRING "LMDB Library name") - -if (NOT STATIC) - find_package(BerkeleyDB) - - if(NOT BERKELEY_DB_LIBRARIES) - die("BerkeleyDB not found. At this time it should be installed in your system for a non-static build.") - else() - message(STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}") - if(BERKELEY_DB_LIBRARIES) - message(STATUS "Found BerkeleyDB shared library") - set(BDB_STATIC false CACHE BOOL "BDB Static flag") - set(BDB_INCLUDE ${BERKELEY_DB_INCLUDE_DIR} CACHE STRING "BDB include path") - set(BDB_LIBRARY ${BERKELEY_DB_LIBRARIES} CACHE STRING "BDB library name") - set(BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs") - else() - die("Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev or the equivalent") - endif() - endif() -endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c81199e0..34c52919 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -103,3 +103,7 @@ add_subdirectory(daemonizer) add_subdirectory(daemon) add_subdirectory(blockchain_utilities) + +if(PER_BLOCK_CHECKPOINT) + add_subdirectory(blocks) +endif() diff --git a/src/blockchain_db/CMakeLists.txt b/src/blockchain_db/CMakeLists.txt index adbe804a..7301cab8 100644 --- a/src/blockchain_db/CMakeLists.txt +++ b/src/blockchain_db/CMakeLists.txt @@ -31,7 +31,7 @@ set(blockchain_db_sources lmdb/db_lmdb.cpp ) -if (NOT STATIC) +if (BERKELEY_DB) set(blockchain_db_sources ${blockchain_db_sources} berkeleydb/db_bdb.cpp @@ -46,7 +46,7 @@ set(blockchain_db_private_headers lmdb/db_lmdb.h ) -if (NOT STATIC) +if (BERKELEY_DB) set(blockchain_db_private_headers ${blockchain_db_private_headers} berkeleydb/db_bdb.h diff --git a/src/blockchain_db/berkeleydb/db_bdb.cpp b/src/blockchain_db/berkeleydb/db_bdb.cpp index a8163d4b..ad3febbd 100644 --- a/src/blockchain_db/berkeleydb/db_bdb.cpp +++ b/src/blockchain_db/berkeleydb/db_bdb.cpp @@ -36,6 +36,7 @@ #include "profile_tools.h" using epee::string_tools::pod_to_hex; +#define DB_DEFAULT_TX (m_write_txn != nullptr ? *m_write_txn : (DbTxn*) nullptr) namespace { @@ -117,12 +118,24 @@ const char* const BDB_OUTPUT_KEYS = "output_keys"; const char* const BDB_SPENT_KEYS = "spent_keys"; -const int BUFFER_LENGTH = 4 * 1024 * 1024; +const unsigned int MB = 1024 * 1024; +// ND: FIXME: db keeps running out of locks when doing full syncs. Possible bug??? Set it to 5K for now. +const unsigned int DB_MAX_LOCKS = 5000; +const unsigned int DB_BUFFER_LENGTH = 32 * MB; +// 256MB cache adjust as necessary using DB_CONFIG +const unsigned int DB_DEF_CACHESIZE = 256 * MB; + +#if defined(BDB_BULK_CAN_THREAD) +const unsigned int DB_BUFFER_COUNT = std::thread::hardware_concurrency(); +#else +const unsigned int DB_BUFFER_COUNT = 1; +#endif template struct Dbt_copy: public Dbt { - Dbt_copy(const T &t): t_copy(t) + Dbt_copy(const T &t) : + t_copy(t) { init(); } @@ -151,7 +164,8 @@ private: template<> struct Dbt_copy: public Dbt { - Dbt_copy(const cryptonote::blobdata &bd) : m_data(new char[bd.size()]) + Dbt_copy(const cryptonote::blobdata &bd) : + m_data(new char[bd.size()]) { memcpy(m_data.get(), bd.data(), bd.size()); set_data(m_data.get()); @@ -185,25 +199,20 @@ struct Dbt_safe : public Dbt namespace cryptonote { -void BlockchainBDB::add_block( const block& blk - , const size_t& block_size - , const difficulty_type& cumulative_difficulty - , const uint64_t& coins_generated - , const crypto::hash& blk_hash - ) +void BlockchainBDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, const crypto::hash& blk_hash) { LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); Dbt_copy val_h(blk_hash); - if (m_block_heights->exists(*m_write_txn, &val_h, 0) == 0) + if (m_block_heights->exists(DB_DEFAULT_TX, &val_h, 0) == 0) throw1(BLOCK_EXISTS("Attempting to add block that's already in the db")); if (m_height > 0) { Dbt_copy parent_key(blk.prev_id); Dbt_copy parent_h; - if (m_block_heights->get(*m_write_txn, &parent_key, &parent_h, 0)) + if (m_block_heights->get(DB_DEFAULT_TX, &parent_key, &parent_h, 0)) { LOG_PRINT_L3("m_height: " << m_height); LOG_PRINT_L3("parent_key: " << blk.prev_id); @@ -217,30 +226,30 @@ void BlockchainBDB::add_block( const block& blk Dbt_copy key(m_height + 1); Dbt_copy blob(block_to_blob(blk)); - auto res = m_blocks->put(*m_write_txn, &key, &blob, 0); + auto res = m_blocks->put(DB_DEFAULT_TX, &key, &blob, 0); if (res) throw0(DB_ERROR("Failed to add block blob to db transaction.")); Dbt_copy sz(block_size); - if (m_block_sizes->put(*m_write_txn, &key, &sz, 0)) + if (m_block_sizes->put(DB_DEFAULT_TX, &key, &sz, 0)) throw0(DB_ERROR("Failed to add block size to db transaction.")); Dbt_copy ts(blk.timestamp); - if (m_block_timestamps->put(*m_write_txn, &key, &ts, 0)) + if (m_block_timestamps->put(DB_DEFAULT_TX, &key, &ts, 0)) throw0(DB_ERROR("Failed to add block timestamp to db transaction.")); Dbt_copy diff(cumulative_difficulty); - if (m_block_diffs->put(*m_write_txn, &key, &diff, 0)) + if (m_block_diffs->put(DB_DEFAULT_TX, &key, &diff, 0)) throw0(DB_ERROR("Failed to add block cumulative difficulty to db transaction.")); Dbt_copy coinsgen(coins_generated); - if (m_block_coins->put(*m_write_txn, &key, &coinsgen, 0)) + if (m_block_coins->put(DB_DEFAULT_TX, &key, &coinsgen, 0)) throw0(DB_ERROR("Failed to add block total generated coins to db transaction.")); - if (m_block_heights->put(*m_write_txn, &val_h, &key, 0)) + if (m_block_heights->put(DB_DEFAULT_TX, &val_h, &key, 0)) throw0(DB_ERROR("Failed to add block height by hash to db transaction.")); - if (m_block_hashes->put(*m_write_txn, &key, &val_h, 0)) + if (m_block_hashes->put(DB_DEFAULT_TX, &key, &val_h, 0)) throw0(DB_ERROR("Failed to add block hash to db transaction.")); } @@ -254,28 +263,28 @@ void BlockchainBDB::remove_block() Dbt_copy k(m_height); Dbt_copy h; - if (m_block_hashes->get(*m_write_txn, &k, &h, 0)) + if (m_block_hashes->get(DB_DEFAULT_TX, &k, &h, 0)) throw1(BLOCK_DNE("Attempting to remove block that's not in the db")); - if (m_blocks->del(*m_write_txn, &k, 0)) + if (m_blocks->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block to db transaction")); - if (m_block_sizes->del(*m_write_txn, &k, 0)) + if (m_block_sizes->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block size to db transaction")); - if (m_block_diffs->del(*m_write_txn, &k, 0)) + if (m_block_diffs->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block cumulative difficulty to db transaction")); - if (m_block_coins->del(*m_write_txn, &k, 0)) + if (m_block_coins->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block total generated coins to db transaction")); - if (m_block_timestamps->del(*m_write_txn, &k, 0)) + if (m_block_timestamps->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block timestamp to db transaction")); - if (m_block_heights->del(*m_write_txn, &h, 0)) + if (m_block_heights->del(DB_DEFAULT_TX, &h, 0)) throw1(DB_ERROR("Failed to add removal of block height by hash to db transaction")); - if (m_block_hashes->del(*m_write_txn, &k, 0)) + if (m_block_hashes->del(DB_DEFAULT_TX, &k, 0)) throw1(DB_ERROR("Failed to add removal of block hash to db transaction")); } @@ -286,19 +295,19 @@ void BlockchainBDB::add_transaction_data(const crypto::hash& blk_hash, const tra Dbt_copy val_h(tx_hash); - if (m_txs->exists(*m_write_txn, &val_h, 0) == 0) + if (m_txs->exists(DB_DEFAULT_TX, &val_h, 0) == 0) throw1(TX_EXISTS("Attempting to add transaction that's already in the db")); Dbt_copy blob(tx_to_blob(tx)); - if (m_txs->put(*m_write_txn, &val_h, &blob, 0)) + if (m_txs->put(DB_DEFAULT_TX, &val_h, &blob, 0)) throw0(DB_ERROR("Failed to add tx blob to db transaction")); Dbt_copy height(m_height + 1); - if (m_tx_heights->put(*m_write_txn, &val_h, &height, 0)) + if (m_tx_heights->put(DB_DEFAULT_TX, &val_h, &height, 0)) throw0(DB_ERROR("Failed to add tx block height to db transaction")); Dbt_copy unlock_time(tx.unlock_time); - if (m_tx_unlocks->put(*m_write_txn, &val_h, &unlock_time, 0)) + if (m_tx_unlocks->put(DB_DEFAULT_TX, &val_h, &unlock_time, 0)) throw0(DB_ERROR("Failed to add tx unlock time to db transaction")); } @@ -308,24 +317,24 @@ void BlockchainBDB::remove_transaction_data(const crypto::hash& tx_hash, const t check_open(); Dbt_copy val_h(tx_hash); - if (m_txs->exists(*m_write_txn, &val_h, 0)) + if (m_txs->exists(DB_DEFAULT_TX, &val_h, 0)) throw1(TX_DNE("Attempting to remove transaction that isn't in the db")); - if (m_txs->del(*m_write_txn, &val_h, 0)) + if (m_txs->del(DB_DEFAULT_TX, &val_h, 0)) throw1(DB_ERROR("Failed to add removal of tx to db transaction")); - if (m_tx_unlocks->del(*m_write_txn, &val_h, 0)) + if (m_tx_unlocks->del(DB_DEFAULT_TX, &val_h, 0)) throw1(DB_ERROR("Failed to add removal of tx unlock time to db transaction")); - if (m_tx_heights->del(*m_write_txn, &val_h, 0)) + if (m_tx_heights->del(DB_DEFAULT_TX, &val_h, 0)) throw1(DB_ERROR("Failed to add removal of tx block height to db transaction")); remove_tx_outputs(tx_hash, tx); - if (m_tx_outputs->del(*m_write_txn, &val_h, 0)) + if (m_tx_outputs->del(DB_DEFAULT_TX, &val_h, 0)) throw1(DB_ERROR("Failed to add removal of tx outputs to db transaction")); } -void BlockchainBDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index) +void BlockchainBDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time) { LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); @@ -333,23 +342,28 @@ void BlockchainBDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_out Dbt_copy k(m_num_outputs + 1); Dbt_copy v(tx_hash); - if (m_output_txs->put(*m_write_txn, &k, &v, 0)) + if (m_output_txs->put(DB_DEFAULT_TX, &k, &v, 0)) throw0(DB_ERROR("Failed to add output tx hash to db transaction")); - if (m_tx_outputs->put(*m_write_txn, &v, &k, 0)) + if (m_tx_outputs->put(DB_DEFAULT_TX, &v, &k, 0)) throw0(DB_ERROR("Failed to add tx output index to db transaction")); Dbt_copy val_local_index(local_index); - if (m_output_indices->put(*m_write_txn, &k, &val_local_index, 0)) + if (m_output_indices->put(DB_DEFAULT_TX, &k, &val_local_index, 0)) throw0(DB_ERROR("Failed to add tx output index to db transaction")); Dbt_copy val_amount(tx_output.amount); - if (m_output_amounts->put(*m_write_txn, &val_amount, &k, 0)) + if (m_output_amounts->put(DB_DEFAULT_TX, &val_amount, &k, 0)) throw0(DB_ERROR("Failed to add output amount to db transaction.")); if (tx_output.target.type() == typeid(txout_to_key)) { - Dbt_copy val_pubkey(boost::get(tx_output.target).key); - if (m_output_keys->put(*m_write_txn, &k, &val_pubkey, 0)) + output_data_t od; + od.pubkey = boost::get < txout_to_key > (tx_output.target).key; + od.unlock_time = unlock_time; + od.height = m_height; + + Dbt_copy data(od); + if (m_output_keys->put(DB_DEFAULT_TX, &k, &data, 0)) throw0(DB_ERROR("Failed to add output pubkey to db transaction")); } @@ -360,7 +374,7 @@ void BlockchainBDB::remove_tx_outputs(const crypto::hash& tx_hash, const transac { LOG_PRINT_L3("BlockchainBDB::" << __func__); - bdb_cur cur(*m_write_txn, m_tx_outputs); + bdb_cur cur(DB_DEFAULT_TX, m_tx_outputs); Dbt_copy k(tx_hash); Dbt_copy v; @@ -407,7 +421,7 @@ void BlockchainBDB::remove_output(const uint64_t& out_index, const uint64_t amou Dbt_copy k(out_index); - auto result = m_output_indices->del(*m_write_txn, &k, 0); + auto result = m_output_indices->del(DB_DEFAULT_TX, &k, 0); if (result == DB_NOTFOUND) { LOG_PRINT_L0("Unexpected: global output index not found in m_output_indices"); @@ -417,7 +431,7 @@ void BlockchainBDB::remove_output(const uint64_t& out_index, const uint64_t amou throw1(DB_ERROR("Error adding removal of output tx index to db transaction")); } - result = m_output_txs->del(*m_write_txn, &k, 0); + result = m_output_txs->del(DB_DEFAULT_TX, &k, 0); // if (result != 0 && result != DB_NOTFOUND) // throw1(DB_ERROR("Error adding removal of output tx hash to db transaction")); if (result == DB_NOTFOUND) @@ -429,7 +443,7 @@ void BlockchainBDB::remove_output(const uint64_t& out_index, const uint64_t amou throw1(DB_ERROR("Error adding removal of output tx hash to db transaction")); } - result = m_output_keys->del(*m_write_txn, &k, 0); + result = m_output_keys->del(DB_DEFAULT_TX, &k, 0); if (result == DB_NOTFOUND) { LOG_PRINT_L0("Unexpected: global output index not found in m_output_keys"); @@ -447,7 +461,7 @@ void BlockchainBDB::remove_amount_output_index(const uint64_t amount, const uint LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_cur cur(*m_write_txn, m_output_amounts); + bdb_cur cur(DB_DEFAULT_TX, m_output_amounts); Dbt_copy k(amount); Dbt_copy v; @@ -497,11 +511,11 @@ void BlockchainBDB::add_spent_key(const crypto::key_image& k_image) check_open(); Dbt_copy val_key(k_image); - if (m_spent_keys->exists(*m_write_txn, &val_key, 0) == 0) + if (m_spent_keys->exists(DB_DEFAULT_TX, &val_key, 0) == 0) throw1(KEY_IMAGE_EXISTS("Attempting to add spent key image that's already in the db")); Dbt_copy val('\0'); - if (m_spent_keys->put(*m_write_txn, &val_key, &val, 0)) + if (m_spent_keys->put(DB_DEFAULT_TX, &val_key, &val, 0)) throw1(DB_ERROR("Error adding spent key image to db transaction.")); } @@ -511,7 +525,7 @@ void BlockchainBDB::remove_spent_key(const crypto::key_image& k_image) check_open(); Dbt_copy k(k_image); - auto result = m_spent_keys->del(*m_write_txn, &k, 0); + auto result = m_spent_keys->del(DB_DEFAULT_TX, &k, 0); if (result != 0 && result != DB_NOTFOUND) throw1(DB_ERROR("Error adding removal of key image to db transaction")); } @@ -539,44 +553,19 @@ tx_out BlockchainBDB::output_from_blob(const blobdata& blob) const return o; } -uint64_t BlockchainBDB::get_output_global_index(const uint64_t& amount, const uint64_t& index) const +uint64_t BlockchainBDB::get_output_global_index(const uint64_t& amount, const uint64_t& index) { - LOG_PRINT_L3("BlockchainLMDB::" << __func__); + LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - bdb_cur cur(txn, m_output_amounts); - - Dbt_copy k(amount); - Dbt_copy v; - - auto result = cur->get(&k, &v, DB_SET); - if (result == DB_NOTFOUND) + std::vector < uint64_t > offsets; + std::vector < uint64_t > global_indices; + offsets.push_back(index); + get_output_global_indices(amount, offsets, global_indices); + if (!global_indices.size()) throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - db_recno_t num_elems; - cur->count(&num_elems, 0); - - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - - for (uint64_t i = 0; i < index; ++i) - { - cur->get(&k, &v, DB_NEXT_DUP); - } - - uint64_t glob_index = v; - - cur.close(); - - txn.commit(); - - return glob_index; + return global_indices[0]; } void BlockchainBDB::check_open() const @@ -589,24 +578,22 @@ void BlockchainBDB::check_open() const BlockchainBDB::~BlockchainBDB() { LOG_PRINT_L3("BlockchainBDB::" << __func__); - if(m_buffer != NULL) - free(m_buffer); - m_buffer = NULL; + if (m_open) { close(); } } -BlockchainBDB::BlockchainBDB(bool batch_transactions) +BlockchainBDB::BlockchainBDB(bool batch_transactions) : + m_buffer(DB_BUFFER_COUNT, DB_BUFFER_LENGTH) { - m_buffer = malloc(BUFFER_LENGTH); LOG_PRINT_L3("BlockchainBDB::" << __func__); // initialize folder to something "safe" just in case // someone accidentally misuses this class... m_folder = "thishsouldnotexistbecauseitisgibberish"; m_open = false; - + m_run_checkpoint = 0; m_batch_transactions = batch_transactions; m_write_txn = nullptr; m_height = 0; @@ -639,12 +626,21 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags) //Create BerkeleyDB environment m_env = new DbEnv(0); // no flags needed for DbEnv - uint32_t db_env_open_flags = DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK - | DB_INIT_LOG | DB_INIT_TXN | DB_RECOVER - | DB_THREAD; + uint32_t db_env_open_flags = DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_RECOVER | DB_THREAD; + // Set some default values for these parameters. + // They can be overridden using the DB_CONFIG file. + m_env->set_cachesize(0, DB_DEF_CACHESIZE, 1); + m_env->set_lk_max_locks(DB_MAX_LOCKS); + m_env->set_lk_max_lockers(DB_MAX_LOCKS); + m_env->set_lk_max_objects(DB_MAX_LOCKS); + // last parameter left 0, files will be created with default rw access m_env->open(filename.c_str(), db_env_open_flags, 0); + m_env->set_flags(db_flags, 1); + + if(m_auto_remove_logs) + m_env->log_set_config(DB_LOG_AUTO_REMOVE, 1); // begin transaction to init dbs bdb_txn_safe txn; @@ -686,7 +682,7 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags) m_output_txs->set_re_len(sizeof(crypto::hash)); m_output_indices->set_re_len(sizeof(uint64_t)); - m_output_keys->set_re_len(sizeof(crypto::public_key)); + m_output_keys->set_re_len(sizeof(output_data_t)); //TODO: Find out if we need to do Db::set_flags(DB_RENUMBER) // for the RECNO databases. We shouldn't as we're only @@ -717,21 +713,29 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags) m_spent_keys->open(txn, BDB_SPENT_KEYS, NULL, DB_HASH, DB_CREATE, 0); + txn.commit(); + DB_BTREE_STAT* stats; // DB_FAST_STAT can apparently cause an incorrect number of records // to be returned. The flag should be set to 0 instead if this proves // to be the case. - m_blocks->stat(txn, &stats, DB_FAST_STAT); + + // ND: The bug above can occur when a block is popped and the application + // exits without pushing a new block to the db. Set txn to NULL and DB_FAST_STAT + // to zero (0) for reliability. + m_blocks->stat(NULL, &stats, 0); m_height = stats->bt_nkeys; delete stats; // see above comment about DB_FAST_STAT - m_output_indices->stat(txn, &stats, DB_FAST_STAT); + m_output_indices->stat(NULL, &stats, 0); m_num_outputs = stats->bt_nkeys; delete stats; - txn.commit(); + // run checkpoint thread + m_run_checkpoint = true; + m_checkpoint_thread.reset(new boost::thread(&BlockchainBDB::checkpoint_worker, this)); } catch (const std::exception& e) { @@ -746,6 +750,10 @@ void BlockchainBDB::close() LOG_PRINT_L3("BlockchainBDB::" << __func__); this->sync(); + m_run_checkpoint = false; + m_checkpoint_thread->join(); + m_checkpoint_thread.reset(); + // FIXME: not yet thread safe!!! Use with care. m_open = false; m_env->close(DB_FORCESYNC); @@ -786,7 +794,7 @@ void BlockchainBDB::sync() void BlockchainBDB::reset() { - LOG_PRINT_L3("BlockchainLMDB::" << __func__); + LOG_PRINT_L3("BlockchainBDB::" << __func__); // TODO: this } @@ -853,7 +861,7 @@ std::vector BlockchainBDB::get_filenames() const std::vector full_paths; -for (auto& filename : filenames) + for (auto& filename : filenames) { boost::filesystem::path p(m_folder); p /= filename; @@ -890,23 +898,17 @@ bool BlockchainBDB::block_exists(const crypto::hash& h) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); - auto get_result = m_block_heights->exists(txn, &key, 0); + auto get_result = m_block_heights->exists(DB_DEFAULT_TX, &key, 0); if (get_result == DB_NOTFOUND) { - txn.commit(); LOG_PRINT_L3("Block with hash " << epee::string_tools::pod_to_hex(h) << " not found in db"); return false; } else if (get_result) throw0(DB_ERROR("DB error attempting to fetch block index from hash")); - txn.commit(); return true; } @@ -923,22 +925,16 @@ uint64_t BlockchainBDB::get_block_height(const crypto::hash& h) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); - Dbt_copy result; + Dbt_copy result; - auto get_result = m_block_heights->get(txn, &key, &result, 0); + auto get_result = m_block_heights->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) throw1(BLOCK_DNE("Attempted to retrieve non-existent block height")); else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a block height from the db")); - txn.commit(); - - return (uint64_t)result - 1; + return result - 1; } block_header BlockchainBDB::get_block_header(const crypto::hash& h) const @@ -955,13 +951,9 @@ block BlockchainBDB::get_block_from_height(const uint64_t& height) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_safe result; - auto get_result = m_blocks->get(txn, &key, &result, 0); + auto get_result = m_blocks->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(DB_ERROR(std::string("Attempt to get block from height ").append(boost::lexical_cast(height)).append(" failed -- block not in db").c_str())); @@ -969,8 +961,6 @@ block BlockchainBDB::get_block_from_height(const uint64_t& height) const else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a block from the db")); - txn.commit(); - blobdata bd; bd.assign(reinterpret_cast(result.get_data()), result.get_size()); @@ -986,13 +976,9 @@ uint64_t BlockchainBDB::get_block_timestamp(const uint64_t& height) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_copy result; - auto get_result = m_block_timestamps->get(txn, &key, &result, 0); + auto get_result = m_block_timestamps->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(DB_ERROR(std::string("Attempt to get timestamp from height ").append(boost::lexical_cast(height)).append(" failed -- timestamp not in db").c_str())); @@ -1000,7 +986,6 @@ uint64_t BlockchainBDB::get_block_timestamp(const uint64_t& height) const else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a timestamp from the db")); - txn.commit(); return result; } @@ -1023,13 +1008,9 @@ size_t BlockchainBDB::get_block_size(const uint64_t& height) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_copy result; - auto get_result = m_block_sizes->get(txn, &key, &result, 0); + auto get_result = m_block_sizes->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(DB_ERROR(std::string("Attempt to get block size from height ").append(boost::lexical_cast(height)).append(" failed -- block size not in db").c_str())); @@ -1037,7 +1018,6 @@ size_t BlockchainBDB::get_block_size(const uint64_t& height) const else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a block size from the db")); - txn.commit(); return result; } @@ -1046,13 +1026,9 @@ difficulty_type BlockchainBDB::get_block_cumulative_difficulty(const uint64_t& h LOG_PRINT_L3("BlockchainBDB::" << __func__ << " height: " << height); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_copy result; - auto get_result = m_block_diffs->get(txn, &key, &result, 0); + auto get_result = m_block_diffs->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(DB_ERROR(std::string("Attempt to get cumulative difficulty from height ").append(boost::lexical_cast(height)).append(" failed -- difficulty not in db").c_str())); @@ -1060,7 +1036,6 @@ difficulty_type BlockchainBDB::get_block_cumulative_difficulty(const uint64_t& h else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a cumulative difficulty from the db")); - txn.commit(); return result; } @@ -1086,13 +1061,9 @@ uint64_t BlockchainBDB::get_block_already_generated_coins(const uint64_t& height LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_copy result; - auto get_result = m_block_coins->get(txn, &key, &result, 0); + auto get_result = m_block_coins->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(DB_ERROR(std::string("Attempt to get generated coins from height ").append(boost::lexical_cast(height)).append(" failed -- block size not in db").c_str())); @@ -1100,7 +1071,6 @@ uint64_t BlockchainBDB::get_block_already_generated_coins(const uint64_t& height else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a total generated coins from the db")); - txn.commit(); return result; } @@ -1109,13 +1079,9 @@ crypto::hash BlockchainBDB::get_block_hash_from_height(const uint64_t& height) c LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(height + 1); Dbt_copy result; - auto get_result = m_block_hashes->get(txn, &key, &result, 0); + auto get_result = m_block_hashes->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw0(BLOCK_DNE(std::string("Attempt to get hash from height ").append(boost::lexical_cast(height)).append(" failed -- hash not in db").c_str())); @@ -1123,7 +1089,6 @@ crypto::hash BlockchainBDB::get_block_hash_from_height(const uint64_t& height) c else if (get_result) throw0(DB_ERROR("Error attempting to retrieve a block hash from the db.")); - txn.commit(); return result; } @@ -1194,19 +1159,14 @@ bool BlockchainBDB::tx_exists(const crypto::hash& h) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); TIME_MEASURE_START(time1); - auto get_result = m_txs->exists(txn, &key, 0); + auto get_result = m_txs->exists(DB_DEFAULT_TX, &key, 0); TIME_MEASURE_FINISH(time1); time_tx_exists += time1; if (get_result == DB_NOTFOUND) { - txn.commit(); LOG_PRINT_L1("transaction with hash " << epee::string_tools::pod_to_hex(h) << " not found in db"); return false; } @@ -1218,16 +1178,12 @@ bool BlockchainBDB::tx_exists(const crypto::hash& h) const uint64_t BlockchainBDB::get_tx_unlock_time(const crypto::hash& h) const { - LOG_PRINT_L3("BlockchainLMDB::" << __func__); + LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); Dbt_copy result; - auto get_result = m_tx_unlocks->get(txn, &key, &result, 0); + auto get_result = m_tx_unlocks->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) throw1(TX_DNE(std::string("tx unlock time with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found in db").c_str())); else if (get_result) @@ -1241,13 +1197,9 @@ transaction BlockchainBDB::get_tx(const crypto::hash& h) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); Dbt_safe result; - auto get_result = m_txs->get(txn, &key, &result, 0); + auto get_result = m_txs->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) throw1(TX_DNE(std::string("tx with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found in db").c_str())); else if (get_result) @@ -1260,8 +1212,6 @@ transaction BlockchainBDB::get_tx(const crypto::hash& h) const if (!parse_and_validate_tx_from_blob(bd, tx)) throw0(DB_ERROR("Failed to parse tx from blob retrieved from the db")); - txn.commit(); - return tx; } @@ -1270,21 +1220,15 @@ uint64_t BlockchainBDB::get_tx_count() const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - DB_BTREE_STAT* stats; // DB_FAST_STAT can apparently cause an incorrect number of records // to be returned. The flag should be set to 0 instead if this proves // to be the case. - m_txs->stat(txn, &stats, DB_FAST_STAT); + m_txs->stat(DB_DEFAULT_TX, &stats, DB_FAST_STAT); auto num_txs = stats->bt_nkeys; delete stats; - txn.commit(); - return num_txs; } @@ -1307,13 +1251,9 @@ uint64_t BlockchainBDB::get_tx_block_height(const crypto::hash& h) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy key(h); Dbt_copy result; - auto get_result = m_tx_heights->get(txn, &key, &result, 0); + auto get_result = m_tx_heights->get(DB_DEFAULT_TX, &key, &result, 0); if (get_result == DB_NOTFOUND) { throw1(TX_DNE(std::string("tx height with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found in db").c_str())); @@ -1321,8 +1261,6 @@ uint64_t BlockchainBDB::get_tx_block_height(const crypto::hash& h) const else if (get_result) throw0(DB_ERROR("DB error attempting to fetch tx height from hash")); - txn.commit(); - return (uint64_t)result - 1; } @@ -1344,14 +1282,10 @@ uint64_t BlockchainBDB::get_num_outputs(const uint64_t& amount) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - bdb_cur cur(txn, m_output_amounts); + bdb_cur cur(DB_DEFAULT_TX, m_output_amounts); Dbt_copy k(amount); - Dbt_copy v; + Dbt_copy v; auto result = cur->get(&k, &v, DB_SET); if (result == DB_NOTFOUND) { @@ -1363,25 +1297,19 @@ uint64_t BlockchainBDB::get_num_outputs(const uint64_t& amount) const db_recno_t num_elems = 0; cur->count(&num_elems, 0); - txn.commit(); + cur.close(); return num_elems; } -crypto::public_key BlockchainBDB::get_output_key(const uint64_t& amount, const uint64_t& index) const +output_data_t BlockchainBDB::get_output_key(const uint64_t& global_index) const { LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - uint64_t glob_index = get_output_global_index(amount, index); - - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - Dbt_copy k(glob_index); - Dbt_copy v; - auto get_result = m_output_keys->get(txn, &k, &v, 0); + Dbt_copy k(global_index); + Dbt_copy v; + auto get_result = m_output_keys->get(DB_DEFAULT_TX, &k, &v, 0); if (get_result == DB_NOTFOUND) throw0(DB_ERROR("Attempting to get output pubkey by global index, but key does not exist")); else if (get_result) @@ -1390,6 +1318,15 @@ crypto::public_key BlockchainBDB::get_output_key(const uint64_t& amount, const u return v; } +output_data_t BlockchainBDB::get_output_key(const uint64_t& amount, const uint64_t& index) +{ + LOG_PRINT_L3("BlockchainBDB::" << __func__); + check_open(); + + uint64_t glob_index = get_output_global_index(amount, index); + return get_output_key(glob_index); +} + // As this is not used, its return is now a blank output. // This will save on space in the db. tx_out BlockchainBDB::get_output(const crypto::hash& h, const uint64_t& index) const @@ -1406,230 +1343,17 @@ tx_out BlockchainBDB::get_output(const uint64_t& index) const return tx_out(); } -void BlockchainBDB::get_output_tx_and_index(const uint64_t& amount, - std::vector &offsets, - std::vector &indices) const +tx_out_index BlockchainBDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) { LOG_PRINT_L3("BlockchainBDB::" << __func__); - check_open(); - - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - bdb_cur cur(txn, m_output_amounts); - uint64_t max = 0; - for(const uint64_t& index : offsets) - { - if(index > max) - max = index; - } - - // ??? might be a bug, don't always treat as uint64_t - #define DBT_VALUE(dbt) v.get_size() == sizeof(uint64_t) ? \ - *((uint64_t *)v.get_data()) : *((uint32_t *)v.get_data()) \ - - // get returned keypairs count - #define DB_COUNT_RECORDS(dbt, cnt) \ - do { \ - uint32_t *_p = (uint32_t *) ((uint8_t *)(dbt)->data + \ - (dbt)->ulen - sizeof(uint32_t)); \ - cnt = 0; \ - while(*_p != (uint32_t) -1) { \ - _p -= 2; \ - ++cnt; \ - } \ - } while(0); \ - - Dbt_copy k(amount); - Dbt_copy v; - uint64_t buflen = 0; - uint64_t t_dbmul = 0; - uint64_t t_dbscan = 0; - TIME_MEASURE_START(db2); - if(max <= 1) - { - for (const uint64_t& index : offsets) - { - TIME_MEASURE_START(t_seek); - - auto result = cur->get(&k, &v, DB_SET); - if (result == DB_NOTFOUND) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - - db_recno_t num_elems = 0; - cur->count(&num_elems, 0); - - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - - for (uint64_t i = 0; i < index; ++i) - { - cur->get(&k, &v, DB_NEXT_DUP); - } - - uint64_t glob_index = DBT_VALUE(v); - - LOG_PRINT_L1("L0->v: " << glob_index); - tx_out_index res = get_output_tx_and_index_from_global(glob_index); - indices.push_back(res); - - TIME_MEASURE_FINISH(t_seek); - } - } - else - { - // setup a 256KB minimum buffer size - uint32_t pagesize = 256 * 1024; - - // Retrieve only a suitable portion of the kvp data, up to somewhere near - // the maximum offset value being retrieved - buflen = (max + 1) * 4 * sizeof(uint64_t); - buflen = ((buflen / pagesize) + ((buflen % pagesize) > 0 ? 1 : 0)) * pagesize; - bool singlebuff = buflen <= BUFFER_LENGTH; - buflen = buflen < BUFFER_LENGTH ? buflen : BUFFER_LENGTH; - - Dbt data; - data.set_data(m_buffer); - data.set_ulen(buflen); - data.set_size(buflen); - data.set_flags(DB_DBT_USERMEM); - - uint32_t curcount = 0; - uint32_t blockstart = 0; - for (const uint64_t& index : offsets) - { - // fixme! for whatever reason, the first call to DB_MULTIPLE | DB_SET does not - // retrieve the first value. - if(index <= 1) - { - auto result = cur->get(&k, &v, DB_SET); - if (result == DB_NOTFOUND) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - - db_recno_t num_elems = 0; - cur->count(&num_elems, 0); - - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - if(index > 0) - cur->get(&k, &v, DB_NEXT_DUP); - } - else - { - while(index >= curcount) - { - TIME_MEASURE_START(t_db1); - try - { - cur->get(&k, &data, DB_MULTIPLE | (curcount == 0 ? DB_SET : DB_NEXT_DUP)); - blockstart = curcount; - - int count = 0; - // fixme! this might be slow on some systems. - DB_COUNT_RECORDS((DBT *) &data, count); - curcount += count; - } - catch (const std::exception &e) - { - LOG_PRINT_L0("DB_EXCEPTION: " << e.what()); - } - - TIME_MEASURE_FINISH(t_db1); - t_dbmul += t_db1; - if(singlebuff) - break; - } - - LOG_PRINT_L1("Records returned: " << curcount << " Index: " << index); - TIME_MEASURE_START(t_db2); - DBT *pdata = (DBT *) &data; - - uint8_t *value; - uint64_t dlen = 0; - - void *pbase = ((uint8_t *)(pdata->data)) + pdata->ulen - sizeof(uint32_t); - uint32_t *p = (uint32_t *) pbase; - if (*p == (uint32_t) -1) - { - value = NULL; - } - else - { - p -= (index - blockstart) * 2; // index * 4 + 2; <- if DB_MULTIPLE_KEY - value = (uint8_t *) pdata->data + *p--; - dlen = *p--; - if (value == (uint8_t *) pdata->data) - value = NULL; - } - - if (value != NULL) - { - v = dlen == sizeof(uint64_t) ? *((uint64_t *) value) - : *((uint32_t *) value); - } - TIME_MEASURE_FINISH(t_db2); - t_dbscan += t_db2; - } - - uint64_t glob_index = DBT_VALUE(v); - - LOG_PRINT_L1("L1->v: " << glob_index); - tx_out_index res = get_output_tx_and_index_from_global(glob_index); - indices.push_back(res); - } - } - - TIME_MEASURE_FINISH(db2); - - LOG_PRINT_L1("blen: " << buflen << " db1: " << t_dbmul << " db2: " << t_dbscan); - - cur.close(); - txn.commit(); -} - -tx_out_index BlockchainBDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const -{ - LOG_PRINT_L3("BlockchainBDB::" << __func__); - check_open(); - - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - bdb_cur cur(txn, m_output_amounts); - - Dbt_copy k(amount); - Dbt_copy v; - - auto result = cur->get(&k, &v, DB_SET); - if (result == DB_NOTFOUND) + std::vector < uint64_t > offsets; + std::vector indices; + offsets.push_back(index); + get_output_tx_and_index(amount, offsets, indices); + if (!indices.size()) throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - db_recno_t num_elems = 0; - cur->count(&num_elems, 0); - - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - - for (uint64_t i = 0; i < index; ++i) - { - cur->get(&k, &v, DB_NEXT_DUP); - } - - uint64_t glob_index = v; - - cur.close(); - - txn.commit(); - - return get_output_tx_and_index_from_global(glob_index); + return indices[0]; } std::vector BlockchainBDB::get_tx_output_indices(const crypto::hash& h) const @@ -1638,14 +1362,10 @@ std::vector BlockchainBDB::get_tx_output_indices(const crypto::hash& h check_open(); std::vector index_vec; - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - bdb_cur cur(txn, m_tx_outputs); + bdb_cur cur(DB_DEFAULT_TX, m_tx_outputs); Dbt_copy k(h); - Dbt_copy v; + Dbt_copy v; auto result = cur->get(&k, &v, DB_SET); if (result == DB_NOTFOUND) throw1(OUTPUT_DNE("Attempting to get an output by tx hash and tx index, but output not found")); @@ -1662,7 +1382,6 @@ std::vector BlockchainBDB::get_tx_output_indices(const crypto::hash& h } cur.close(); - txn.commit(); return index_vec; } @@ -1680,10 +1399,6 @@ std::vector BlockchainBDB::get_tx_amount_output_indices(const crypto:: transaction tx = get_tx(h); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - uint64_t i = 0; uint64_t global_index; for (const auto& vout : tx.vout) @@ -1692,10 +1407,10 @@ std::vector BlockchainBDB::get_tx_amount_output_indices(const crypto:: global_index = index_vec[i]; - bdb_cur cur(txn, m_output_amounts); + bdb_cur cur(DB_DEFAULT_TX, m_output_amounts); Dbt_copy k(amount); - Dbt_copy v; + Dbt_copy v; auto result = cur->get(&k, &v, DB_SET); if (result == DB_NOTFOUND) @@ -1728,7 +1443,6 @@ std::vector BlockchainBDB::get_tx_amount_output_indices(const crypto:: { // not found cur.close(); - txn.commit(); throw1(OUTPUT_DNE("specified output not found in db")); } @@ -1736,8 +1450,6 @@ std::vector BlockchainBDB::get_tx_amount_output_indices(const crypto:: ++i; } - txn.commit(); - return index_vec2; } @@ -1747,14 +1459,10 @@ tx_out_index BlockchainBDB::get_output_tx_and_index_from_global(const uint64_t& LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy k(index); Dbt_copy v; - auto get_result = m_output_txs->get(txn, &k, &v, 0); + auto get_result = m_output_txs->get(DB_DEFAULT_TX, &k, &v, 0); if (get_result == DB_NOTFOUND) throw1(OUTPUT_DNE("output with given index not in db")); else if (get_result) @@ -1763,14 +1471,12 @@ tx_out_index BlockchainBDB::get_output_tx_and_index_from_global(const uint64_t& crypto::hash tx_hash = v; Dbt_copy result; - get_result = m_output_indices->get(txn, &k, &result, 0); + get_result = m_output_indices->get(DB_DEFAULT_TX, &k, &result, 0); if (get_result == DB_NOTFOUND) throw1(OUTPUT_DNE("output with given index not in db")); else if (get_result) throw0(DB_ERROR("DB error attempting to fetch output tx index")); - txn.commit(); - return tx_out_index(tx_hash, result); } @@ -1779,18 +1485,12 @@ bool BlockchainBDB::has_key_image(const crypto::key_image& img) const LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); - bdb_txn_safe txn; - if (m_env->txn_begin(NULL, txn, 0)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - Dbt_copy val_key(img); - if (m_spent_keys->exists(txn, &val_key, 0) == 0) + if (m_spent_keys->exists(DB_DEFAULT_TX, &val_key, 0) == 0) { - txn.commit(); return true; } - txn.commit(); return false; } @@ -1819,17 +1519,12 @@ void BlockchainBDB::batch_abort() void BlockchainBDB::set_batch_transactions(bool batch_transactions) { - LOG_PRINT_L3("BlockchainLMDB::" << __func__); + LOG_PRINT_L3("BlockchainBDB::" << __func__); m_batch_transactions = batch_transactions; LOG_PRINT_L3("batch transactions " << (m_batch_transactions ? "enabled" : "disabled")); } -uint64_t BlockchainBDB::add_block( const block& blk - , const size_t& block_size - , const difficulty_type& cumulative_difficulty - , const uint64_t& coins_generated - , const std::vector& txs - ) +uint64_t BlockchainBDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, const std::vector& txs) { LOG_PRINT_L3("BlockchainBDB::" << __func__); check_open(); @@ -1888,4 +1583,295 @@ void BlockchainBDB::pop_block(block& blk, std::vector& txs) --m_height; } +void BlockchainBDB::get_output_tx_and_index_from_global(const std::vector &global_indices, std::vector &tx_out_indices) const +{ + LOG_PRINT_L3("BlockchainBDB::" << __func__); + check_open(); + tx_out_indices.clear(); + + for (const uint64_t &index : global_indices) + { + Dbt_copy k(index); + Dbt_copy v; + + auto get_result = m_output_txs->get(DB_DEFAULT_TX, &k, &v, 0); + if (get_result == DB_NOTFOUND) + throw1(OUTPUT_DNE("output with given index not in db")); + else if (get_result) + throw0(DB_ERROR("DB error attempting to fetch output tx hash")); + + crypto::hash tx_hash = v; + + Dbt_copy result; + get_result = m_output_indices->get(DB_DEFAULT_TX, &k, &result, 0); + if (get_result == DB_NOTFOUND) + throw1(OUTPUT_DNE("output with given index not in db")); + else if (get_result) + throw0(DB_ERROR("DB error attempting to fetch output tx index")); + auto hashindex = tx_out_index(tx_hash, result); + tx_out_indices.push_back(hashindex); + } +} + +void BlockchainBDB::get_output_global_indices(const uint64_t& amount, const std::vector &offsets, std::vector &global_indices) +{ + LOG_PRINT_L3("BlockchainBDB::" << __func__); + TIME_MEASURE_START(txx); + check_open(); + + bdb_cur cur(DB_DEFAULT_TX, m_output_amounts); + uint64_t max = 0; + for (const uint64_t& index : offsets) + { + if (index > max) + max = index; + } + + // get returned keypairs count +#define DB_COUNT_RECORDS(dbt, cnt) \ + do { \ + uint32_t *_p = (uint32_t *) ((uint8_t *)(dbt)->data + \ + (dbt)->ulen - sizeof(uint32_t)); \ + cnt = 0; \ + while(*_p != (uint32_t) -1) { \ + _p -= 2; \ + ++cnt; \ + } \ + } while(0); \ + + Dbt_copy k(amount); + Dbt_copy v; + uint64_t buflen = 0; + uint64_t t_dbmul = 0; + uint64_t t_dbscan = 0; + + auto result = cur->get(&k, &v, DB_SET); + if (result == DB_NOTFOUND) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + else if (result) + throw0(DB_ERROR("DB error attempting to get an output")); + + db_recno_t num_elems = 0; + cur->count(&num_elems, 0); + + if (max <= 1 && num_elems <= max) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); + + TIME_MEASURE_START(db2); + if (max <= 1) + { + for (const uint64_t& index : offsets) + { + TIME_MEASURE_START(t_seek); + + auto result = cur->get(&k, &v, DB_SET); + if (result == DB_NOTFOUND) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + else if (result) + throw0(DB_ERROR("DB error attempting to get an output")); + + for (uint64_t i = 0; i < index; ++i) + cur->get(&k, &v, DB_NEXT_DUP); + + uint64_t glob_index = v; + + LOG_PRINT_L3("L0->v: " << glob_index); + global_indices.push_back(glob_index); + + TIME_MEASURE_FINISH(t_seek); + } + } + else + { + // setup a 256KB minimum buffer size + uint32_t pagesize = 256 * 1024; + + // Retrieve only a suitable portion of the kvp data, up to somewhere near + // the maximum offset value being retrieved + buflen = (max + 1) * 4 * sizeof(uint64_t); + buflen = ((buflen / pagesize) + ((buflen % pagesize) > 0 ? 1 : 0)) * pagesize; + + bool nomem = false; + Dbt data; + + bool singlebuff = buflen <= m_buffer.get_buffer_size(); + buflen = buflen < m_buffer.get_buffer_size() ? buflen : m_buffer.get_buffer_size(); + bdb_safe_buffer_t::type buffer = nullptr; + bdb_safe_buffer_autolock lock(m_buffer, buffer); + + data.set_data(buffer); + data.set_ulen(buflen); + data.set_size(buflen); + data.set_flags(DB_DBT_USERMEM); + + uint32_t curcount = 0; + uint32_t blockstart = 0; + for (const uint64_t& index : offsets) + { + if (index >= num_elems) + { + LOG_PRINT_L1("Index: " << index << " Elems: " << num_elems << " partial results found for get_output_tx_and_index"); + break; + } + + // fixme! for whatever reason, the first call to DB_MULTIPLE | DB_SET does not + // retrieve the first value. + if (index <= 1 || nomem) + { + auto result = cur->get(&k, &v, DB_SET); + if (result == DB_NOTFOUND) + { + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + } + else if (result) + { + throw0(DB_ERROR("DB error attempting to get an output")); + } + + for (uint64_t i = 0; i < index; ++i) + cur->get(&k, &v, DB_NEXT_DUP); + } + else + { + while (index >= curcount) + { + TIME_MEASURE_START(t_db1); + try + { + cur->get(&k, &data, DB_MULTIPLE | (curcount == 0 ? DB_SET : DB_NEXT_DUP)); + blockstart = curcount; + + int count = 0; + DB_COUNT_RECORDS((DBT * ) &data, count); + curcount += count; + } + catch (const std::exception &e) + { + cur.close(); + throw0(DB_ERROR(std::string("Failed on DB_MULTIPLE: ").append(e.what()).c_str())); + } + + TIME_MEASURE_FINISH(t_db1); + t_dbmul += t_db1; + if (singlebuff) + break; + } + + LOG_PRINT_L3("Records returned: " << curcount << " Index: " << index); + TIME_MEASURE_START(t_db2); + DBT *pdata = (DBT *) &data; + + uint8_t *value; + uint64_t dlen = 0; + + void *pbase = ((uint8_t *) (pdata->data)) + pdata->ulen - sizeof(uint32_t); + uint32_t *p = (uint32_t *) pbase; + if (*p == (uint32_t) -1) + { + value = NULL; + } + else + { + p -= (index - blockstart) * 2; // index * 4 + 2; <- if DB_MULTIPLE_KEY + value = (uint8_t *) pdata->data + *p--; + dlen = *p--; + if (value == (uint8_t *) pdata->data) + value = NULL; + } + + if (value != NULL) + { + v = dlen == sizeof(uint64_t) ? *((uint64_t *) value) : *((uint32_t *) value); + } + TIME_MEASURE_FINISH(t_db2); + t_dbscan += t_db2; + } + + uint64_t glob_index = v; + + LOG_PRINT_L3("L1->v: " << glob_index); + global_indices.push_back(glob_index); + } + } + TIME_MEASURE_FINISH(db2); + + cur.close(); + + TIME_MEASURE_FINISH(txx); + + LOG_PRINT_L3("blen: " << buflen << " txx: " << txx << " db1: " << t_dbmul << " db2: " << t_dbscan); + +} + +void BlockchainBDB::get_output_key(const uint64_t &amount, const std::vector &offsets, std::vector &outputs) +{ + LOG_PRINT_L3("BlockchainBDB::" << __func__); + check_open(); + TIME_MEASURE_START(txx); + outputs.clear(); + + std::vector < uint64_t > global_indices; + get_output_global_indices(amount, offsets, global_indices); + + TIME_MEASURE_START(db3); + if (global_indices.size() > 0) + { + for (const uint64_t &index : global_indices) + { + Dbt_copy k(index); + Dbt_copy v; + + auto get_result = m_output_keys->get(DB_DEFAULT_TX, &k, &v, 0); + if (get_result == DB_NOTFOUND) + throw1(OUTPUT_DNE("output with given index not in db")); + else if (get_result) + throw0(DB_ERROR("DB error attempting to fetch output tx hash")); + + output_data_t data = *(output_data_t *) v.get_data(); + outputs.push_back(data); + } + } + + TIME_MEASURE_FINISH(txx); + LOG_PRINT_L3("db3: " << db3); +} + +void BlockchainBDB::get_output_tx_and_index(const uint64_t& amount, const std::vector &offsets, std::vector &indices) +{ + LOG_PRINT_L3("BlockchainBDB::" << __func__); + check_open(); + + std::vector < uint64_t > global_indices; + get_output_global_indices(amount, offsets, global_indices); + + TIME_MEASURE_START(db3); + if (global_indices.size() > 0) + get_output_tx_and_index_from_global(global_indices, indices); + TIME_MEASURE_FINISH(db3); + + LOG_PRINT_L3("db3: " << db3); +} + +void BlockchainBDB::checkpoint_worker() const +{ + LOG_PRINT_L0("Entering BDB checkpoint thread.") + int count = 0; + while(m_run_checkpoint && m_open) + { + // sleep every second, so we don't delay exit condition m_run_checkpoint = false + sleep(1); + // checkpoint every 5 minutes + if(count++ >= 300) + { + count = 0; + if(m_env->txn_checkpoint(0, 0, 0) != 0) + { + LOG_PRINT_L0("BDB txn_checkpoint failed.") + break; + } + } + } + LOG_PRINT_L0("Leaving BDB checkpoint thread.") +} + } // namespace cryptonote diff --git a/src/blockchain_db/berkeleydb/db_bdb.h b/src/blockchain_db/berkeleydb/db_bdb.h index fec4ed24..41f4bcb7 100644 --- a/src/blockchain_db/berkeleydb/db_bdb.h +++ b/src/blockchain_db/berkeleydb/db_bdb.h @@ -30,6 +30,11 @@ #include "blockchain_db/blockchain_db.h" #include "cryptonote_protocol/blobdatatype.h" // for type blobdata +#include + +// ND: Enables multi-threaded bulk reads for when getting indices. +// TODO: Disabled for now, as it doesn't seem to provide noticeable improvements (??. Reason: TBD. +// #define BDB_BULK_CAN_THREAD namespace cryptonote { @@ -83,10 +88,145 @@ struct bdb_txn_safe { return &m_txn; } - +private: DbTxn* m_txn; }; +// ND: Class to handle buffer management when doing bulk queries +// (DB_MULTIPLE). Allocates buffers then handles thread queuing +// so a fixed set of buffers can be used (instead of allocating +// every time a bulk query is needed). +template +class bdb_safe_buffer +{ + // limit the number of buffers to 8 + const size_t MaxAllowedBuffers = 8; +public: + bdb_safe_buffer(size_t num_buffers, size_t count) + { + if(num_buffers > MaxAllowedBuffers) + num_buffers = MaxAllowedBuffers; + + set_count(num_buffers); + for (size_t i = 0; i < num_buffers; i++) + m_buffers.push_back((T) malloc(sizeof(T) * count)); + m_buffer_count = count; + } + + ~bdb_safe_buffer() + { + for (size_t i = 0; i < m_buffers.size(); i++) + { + if (m_buffers[i]) + { + free(m_buffers[i]); + m_buffers[i] = nullptr; + } + } + + m_buffers.resize(0); + } + + T acquire_buffer() + { + std::unique_lock lock(m_lock); + m_cv.wait(lock, [&]{ return m_count > 0; }); + + --m_count; + size_t index = -1; + for (size_t i = 0; i < m_open_slot.size(); i++) + { + if (m_open_slot[i]) + { + m_open_slot[i] = false; + index = i; + break; + } + } + + assert(index >= 0); + + T buffer = m_buffers[index]; + m_buffer_map.emplace(buffer, index); + return buffer; + } + + void release_buffer(T buffer) + { + std::unique_lock lock(m_lock); + + assert(buffer != nullptr); + auto it = m_buffer_map.find(buffer); + if (it != m_buffer_map.end()) + { + auto index = it->second; + + assert(index < m_open_slot.size()); + assert(m_open_slot[index] == false); + assert(m_count < m_open_slot.size()); + + ++m_count; + m_open_slot[index] = true; + m_buffer_map.erase(it); + m_cv.notify_one(); + } + } + + size_t get_buffer_size() const + { + return m_buffer_count * sizeof(T); + } + + size_t get_buffer_count() const + { + return m_buffer_count; + } + + typedef T type; + +private: + void set_count(size_t count) + { + assert(count > 0); + m_open_slot.resize(count, true); + m_count = count; + } + + std::vector m_buffers; + std::unordered_map m_buffer_map; + + std::condition_variable m_cv; + std::vector m_open_slot; + size_t m_count; + std::mutex m_lock; + + size_t m_buffer_count; +}; + +template +class bdb_safe_buffer_autolock +{ +public: + bdb_safe_buffer_autolock(T &safe_buffer, typename T::type &buffer) : + m_safe_buffer(safe_buffer), m_buffer(nullptr) + { + m_buffer = m_safe_buffer.acquire_buffer(); + buffer = m_buffer; + } + + ~bdb_safe_buffer_autolock() + { + if (m_buffer != nullptr) + { + m_safe_buffer.release_buffer(m_buffer); + m_buffer = nullptr; + } + } +private: + T &m_safe_buffer; + typename T::type m_buffer; +}; + class BlockchainBDB : public BlockchainDB { public: @@ -159,8 +299,9 @@ public: virtual uint64_t get_num_outputs(const uint64_t& amount) const; - virtual crypto::public_key get_output_key(const uint64_t& amount, const uint64_t& index) const; - + virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index); + virtual output_data_t get_output_key(const uint64_t& global_index) const; + virtual void get_output_key(const uint64_t &amount, const std::vector &offsets, std::vector &outputs); virtual tx_out get_output(const crypto::hash& h, const uint64_t& index) const; /** @@ -175,9 +316,11 @@ public: tx_out get_output(const uint64_t& index) const; virtual tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const; + virtual void get_output_tx_and_index_from_global(const std::vector &global_indices, + std::vector &tx_out_indices) const; - virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const; - virtual void get_output_tx_and_index(const uint64_t& amount, std::vector &offsets, std::vector &indices) const; + virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index); + virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector &offsets, std::vector &indices); virtual std::vector get_tx_output_indices(const crypto::hash& h) const; virtual std::vector get_tx_amount_output_indices(const crypto::hash& h) const; @@ -198,7 +341,12 @@ public: virtual void batch_abort(); virtual void pop_block(block& blk, std::vector& txs); - virtual bool has_bulk_indices() const { return true; } + +#if defined(BDB_BULK_CAN_THREAD) + virtual bool can_thread_bulk_indices() const { return true; } +#else + virtual bool can_thread_bulk_indices() const { return false; } +#endif private: virtual void add_block( const block& blk @@ -214,7 +362,7 @@ private: virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx); - virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index); + virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time); virtual void remove_output(const tx_out& tx_output); @@ -227,6 +375,7 @@ private: virtual void remove_spent_key(const crypto::key_image& k_image); + void get_output_global_indices(const uint64_t& amount, const std::vector &offsets, std::vector &global_indices); /** * @brief convert a tx output to a blob for storage * @@ -253,10 +402,13 @@ private: * * @return the global index of the desired output */ - uint64_t get_output_global_index(const uint64_t& amount, const uint64_t& index) const; - + uint64_t get_output_global_index(const uint64_t& amount, const uint64_t& index); + void checkpoint_worker() const; void check_open() const; - void *m_buffer; + bool m_run_checkpoint; + std::unique_ptr m_checkpoint_thread; + typedef bdb_safe_buffer bdb_safe_buffer_t; + bdb_safe_buffer_t m_buffer; DbEnv* m_env; diff --git a/src/blockchain_db/blockchain_db.cpp b/src/blockchain_db/blockchain_db.cpp index bfe93948..f07f3008 100644 --- a/src/blockchain_db/blockchain_db.cpp +++ b/src/blockchain_db/blockchain_db.cpp @@ -64,7 +64,7 @@ void BlockchainDB::add_transaction(const crypto::hash& blk_hash, const transacti { for (uint64_t i = 0; i < tx.vout.size(); ++i) { - add_output(tx_hash, tx.vout[i], i); + add_output(tx_hash, tx.vout[i], i, tx.unlock_time); } for (const txin_v& tx_input : tx.vin) diff --git a/src/blockchain_db/blockchain_db.h b/src/blockchain_db/blockchain_db.h index 7b826f76..ff15109b 100644 --- a/src/blockchain_db/blockchain_db.h +++ b/src/blockchain_db/blockchain_db.h @@ -138,6 +138,15 @@ namespace cryptonote // typedef for convenience typedef std::pair tx_out_index; +#pragma pack(push, 1) +struct output_data_t +{ + crypto::public_key pubkey; + uint64_t unlock_time; + uint64_t height; +}; +#pragma pack(pop) + /*********************************** * Exception Definitions ***********************************/ @@ -279,7 +288,7 @@ private: virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx) = 0; // tells the subclass to store an output - virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index) = 0; + virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time) = 0; // tells the subclass to remove an output virtual void remove_output(const tx_out& tx_output) = 0; @@ -313,7 +322,7 @@ protected: mutable uint64_t time_tx_exists = 0; uint64_t time_commit1 = 0; - + bool m_auto_remove_logs = true; public: @@ -461,7 +470,8 @@ public: virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0; // return public key for output with global output amount and index - virtual crypto::public_key get_output_key(const uint64_t& amount, const uint64_t& index) const = 0; + virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index) = 0; + virtual output_data_t get_output_key(const uint64_t& global_index) const = 0; // returns the output indexed by in the transaction with hash virtual tx_out get_output(const crypto::hash& h, const uint64_t& index) const = 0; @@ -471,9 +481,11 @@ public: // returns the transaction-local reference for the output with at // return type is pair of tx hash and index - virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const = 0; - virtual void get_output_tx_and_index(const uint64_t& amount, std::vector &offsets, std::vector &indices) const = 0; - virtual bool has_bulk_indices() const = 0; + virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) = 0; + virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector &offsets, std::vector &indices) = 0; + virtual void get_output_key(const uint64_t &amount, const std::vector &offsets, std::vector &outputs) = 0; + + virtual bool can_thread_bulk_indices() const = 0; // return a vector of indices corresponding to the global output index for // each output in the transaction with hash @@ -485,7 +497,10 @@ public: // returns true if key image is present in spent key images storage virtual bool has_key_image(const crypto::key_image& img) const = 0; + void set_auto_remove_logs(bool auto_remove) { m_auto_remove_logs = auto_remove; } + bool m_open; + mutable epee::critical_section m_synchronization_lock; }; // class BlockchainDB diff --git a/src/blockchain_db/lmdb/db_lmdb.cpp b/src/blockchain_db/lmdb/db_lmdb.cpp index 0d0b8ba8..22f7dcd4 100644 --- a/src/blockchain_db/lmdb/db_lmdb.cpp +++ b/src/blockchain_db/lmdb/db_lmdb.cpp @@ -65,10 +65,19 @@ struct lmdb_cur done = false; } - ~lmdb_cur() { close(); } + ~lmdb_cur() + { + close(); + } - operator MDB_cursor*() { return m_cur; } - operator MDB_cursor**() { return &m_cur; } + operator MDB_cursor*() + { + return m_cur; + } + operator MDB_cursor**() + { + return &m_cur; + } void close() { @@ -87,7 +96,8 @@ private: template struct MDB_val_copy: public MDB_val { - MDB_val_copy(const T &t): t_copy(t) + MDB_val_copy(const T &t) : + t_copy(t) { mv_size = sizeof (T); mv_data = &t_copy; @@ -99,7 +109,8 @@ private: template<> struct MDB_val_copy: public MDB_val { - MDB_val_copy(const cryptonote::blobdata &bd): data(new char[bd.size()]) + MDB_val_copy(const cryptonote::blobdata &bd) : + data(new char[bd.size()]) { memcpy(data.get(), bd.data(), bd.size()); mv_size = bd.size(); @@ -109,7 +120,8 @@ private: std::unique_ptr data; }; -auto compare_uint64 = [](const MDB_val *a, const MDB_val *b) { +auto compare_uint64 = [](const MDB_val *a, const MDB_val *b) +{ const uint64_t va = *(const uint64_t*)a->mv_data; const uint64_t vb = *(const uint64_t*)b->mv_data; if (va < vb) return -1; @@ -117,6 +129,20 @@ auto compare_uint64 = [](const MDB_val *a, const MDB_val *b) { else return 1; }; +int compare_hash32(const MDB_val *a, const MDB_val *b) +{ + uint32_t *va = (uint32_t*) a->mv_data; + uint32_t *vb = (uint32_t*) b->mv_data; + for (int n = 7; n >= 0; n--) + { + if (va[n] == vb[n]) + continue; + return va[n] < vb[n] ? -1 : 1; + } + + return 0; +} + const char* const LMDB_BLOCKS = "blocks"; const char* const LMDB_BLOCK_TIMESTAMPS = "block_timestamps"; const char* const LMDB_BLOCK_HEIGHTS = "block_heights"; @@ -235,6 +261,26 @@ void mdb_txn_safe::allow_new_txns() void BlockchainLMDB::do_resize(uint64_t increase_size) { + CRITICAL_REGION_LOCAL(m_synchronization_lock); + const uint64_t add_size = 1LL << 30; + + // check disk capacity + try + { + boost::filesystem::path path(m_folder); + boost::filesystem::space_info si = boost::filesystem::space(path); + if(si.available < add_size) + { + LOG_PRINT_RED_L0("!! WARNING: Insufficient free space to extend database !!: " << si.available / 1LL << 20L); + return; + } + } + catch(...) + { + // print something but proceed. + LOG_PRINT_YELLOW("Unable to query free disk space.", LOG_LEVEL_0); + } + MDB_envinfo mei; mdb_env_info(m_env, &mei); @@ -250,6 +296,9 @@ void BlockchainLMDB::do_resize(uint64_t increase_size) if (increase_size > 0) new_mapsize = mei.me_mapsize + increase_size; + // add 1Gb per resize, instead of doing a percentage increase + // uint64_t new_mapsize = (double) mei.me_mapsize + add_size; + new_mapsize += (new_mapsize % mst.ms_psize); mdb_txn_safe::prevent_new_txns(); @@ -270,9 +319,7 @@ void BlockchainLMDB::do_resize(uint64_t increase_size) mdb_env_set_mapsize(m_env, new_mapsize); - LOG_PRINT_L0("LMDB Mapsize increased." - << " Old: " << mei.me_mapsize / (1024 * 1024) << "MiB" - << ", New: " << new_mapsize / (1024 * 1024) << "MiB"); + LOG_PRINT_GREEN("LMDB Mapsize increased." << " Old: " << mei.me_mapsize / (1024 * 1024) << "MiB" << ", New: " << new_mapsize / (1024 * 1024) << "MiB", LOG_LEVEL_0); mdb_txn_safe::allow_new_txns(); } @@ -280,6 +327,7 @@ void BlockchainLMDB::do_resize(uint64_t increase_size) // threshold_size is used for batch transactions bool BlockchainLMDB::need_resize(uint64_t threshold_size) const { +#if defined(ENABLE_AUTO_RESIZE) MDB_envinfo mei; mdb_env_info(m_env, &mei); @@ -311,12 +359,19 @@ bool BlockchainLMDB::need_resize(uint64_t threshold_size) const return false; } - if ((double)size_used / mei.me_mapsize > RESIZE_PERCENT) + std::mt19937 engine(std::random_device{}()); + std::uniform_real_distribution fdis(0.6, 0.9); + double resize_percent = fdis(engine); + + if ((double)size_used / mei.me_mapsize > resize_percent) { LOG_PRINT_L1("Threshold met (percent-based)"); return true; } return false; +#else + return false; +#endif } void BlockchainLMDB::check_and_resize_for_batch(uint64_t batch_num_blocks) @@ -389,12 +444,8 @@ uint64_t BlockchainLMDB::get_estimated_batch_size(uint64_t batch_num_blocks) con return threshold_size; } -void BlockchainLMDB::add_block( const block& blk - , const size_t& block_size - , const difficulty_type& cumulative_difficulty - , const uint64_t& coins_generated - , const crypto::hash& blk_hash - ) +void BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, + const crypto::hash& blk_hash) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -545,7 +596,7 @@ void BlockchainLMDB::remove_transaction_data(const crypto::hash& tx_hash, const } -void BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index) +void BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -574,10 +625,15 @@ void BlockchainLMDB::add_output(const crypto::hash& tx_hash, const tx_out& tx_ou if (tx_output.target.type() == typeid(txout_to_key)) { - MDB_val_copy val_pubkey(boost::get(tx_output.target).key); - result = mdb_put(*m_write_txn, m_output_keys, &k, &val_pubkey, 0); - if (result) - throw0(DB_ERROR(std::string("Failed to add output pubkey to db transaction: ").append(mdb_strerror(result)).c_str())); + output_data_t od; + od.pubkey = boost::get < txout_to_key > (tx_output.target).key; + od.unlock_time = unlock_time; + od.height = m_height; + + MDB_val_copy data(od); + //MDB_val_copy val_pubkey(boost::get(tx_output.target).key); + if (mdb_put(*m_write_txn, m_output_keys, &k, &data, 0)) + throw0(DB_ERROR("Failed to add output pubkey to db transaction")); } @@ -808,47 +864,17 @@ tx_out BlockchainLMDB::output_from_blob(const blobdata& blob) const return o; } -uint64_t BlockchainLMDB::get_output_global_index(const uint64_t& amount, const uint64_t& index) const +uint64_t BlockchainLMDB::get_output_global_index(const uint64_t& amount, const uint64_t& index) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); - check_open(); - - mdb_txn_safe txn; - if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - - lmdb_cur cur(txn, m_output_amounts); - - MDB_val_copy k(amount); - MDB_val v; - - auto result = mdb_cursor_get(cur, &k, &v, MDB_SET); - if (result == MDB_NOTFOUND) + std::vector offsets; + std::vector global_indices; + offsets.push_back(index); + get_output_global_indices(amount, offsets, global_indices); + if (!global_indices.size()) throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - size_t num_elems = 0; - mdb_cursor_count(cur, &num_elems); - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - - mdb_cursor_get(cur, &k, &v, MDB_FIRST_DUP); - - for (uint64_t i = 0; i < index; ++i) - { - mdb_cursor_get(cur, &k, &v, MDB_NEXT_DUP); - } - - mdb_cursor_get(cur, &k, &v, MDB_GET_CURRENT); - - uint64_t glob_index = *(const uint64_t*)v.mv_data; - - cur.close(); - - txn.commit(); - - return glob_index; + return global_indices[0]; } void BlockchainLMDB::check_open() const @@ -903,8 +929,7 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags) // check for existing LMDB files in base directory boost::filesystem::path old_files = direc.parent_path(); - if (boost::filesystem::exists(old_files / "data.mdb") || - boost::filesystem::exists(old_files / "lock.mdb")) + if (boost::filesystem::exists(old_files / "data.mdb") || boost::filesystem::exists(old_files / "lock.mdb")) { LOG_PRINT_L0("Found existing LMDB files in " << old_files.string()); LOG_PRINT_L0("Move data.mdb and/or lock.mdb to " << filename << ", or delete them, and then restart"); @@ -970,7 +995,7 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags) lmdb_db_open(txn, LMDB_OUTPUT_TXS, MDB_INTEGERKEY | MDB_CREATE, m_output_txs, "Failed to open db handle for m_output_txs"); lmdb_db_open(txn, LMDB_OUTPUT_INDICES, MDB_INTEGERKEY | MDB_CREATE, m_output_indices, "Failed to open db handle for m_output_indices"); - lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts"); + lmdb_db_open(txn, LMDB_OUTPUT_AMOUNTS, MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE, m_output_amounts, "Failed to open db handle for m_output_amounts"); lmdb_db_open(txn, LMDB_OUTPUT_KEYS, MDB_INTEGERKEY | MDB_CREATE, m_output_keys, "Failed to open db handle for m_output_keys"); /*************** not used, but kept for posterity @@ -982,6 +1007,11 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags) mdb_set_dupsort(txn, m_output_amounts, compare_uint64); mdb_set_dupsort(txn, m_tx_outputs, compare_uint64); + mdb_set_compare(txn, m_spent_keys, compare_hash32); + mdb_set_compare(txn, m_block_heights, compare_hash32); + mdb_set_compare(txn, m_txs, compare_hash32); + mdb_set_compare(txn, m_tx_unlocks, compare_hash32); + mdb_set_compare(txn, m_tx_heights, compare_hash32); // get and keep current height MDB_stat db_stats; @@ -995,6 +1025,34 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags) throw0(DB_ERROR("Failed to query m_output_indices")); m_num_outputs = db_stats.ms_entries; + // ND: This "new" version of the lmdb database is incompatible with + // the previous version. Ensure that the output_keys database is + // sizeof(output_data_t) in length. Otherwise, inform user and + // terminate. + if(m_height > 0) + { + MDB_val_copy k(0); + MDB_val v; + auto get_result = mdb_get(txn, m_output_keys, &k, &v); + if(get_result != MDB_SUCCESS) + { + txn.abort(); + m_open = false; + return; + } + + // LOG_PRINT_L0("Output keys size: " << v.mv_size); + if(v.mv_size != sizeof(output_data_t)) + { + txn.abort(); + mdb_env_close(m_env); + m_open = false; + LOG_PRINT_RED_L0("Existing lmdb database is incompatible with this version."); + LOG_PRINT_RED_L0("Please delete the existing database and resync."); + return; + } + } + // commit the transaction txn.commit(); @@ -1604,26 +1662,33 @@ uint64_t BlockchainLMDB::get_num_outputs(const uint64_t& amount) const return num_elems; } -crypto::public_key BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index) const +output_data_t BlockchainLMDB::get_output_key(const uint64_t &global_index) const { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); - uint64_t glob_index = get_output_global_index(amount, index); - mdb_txn_safe txn; if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) throw0(DB_ERROR("Failed to create a transaction for the db")); - MDB_val_copy k(glob_index); + MDB_val_copy k(global_index); MDB_val v; auto get_result = mdb_get(txn, m_output_keys, &k, &v); if (get_result == MDB_NOTFOUND) throw0(DB_ERROR("Attempting to get output pubkey by global index, but key does not exist")); else if (get_result) throw0(DB_ERROR("Error attempting to retrieve an output pubkey from the db")); + txn.commit(); + return *(output_data_t *) v.mv_data; +} - return *(crypto::public_key*)v.mv_data; +output_data_t BlockchainLMDB::get_output_key(const uint64_t& amount, const uint64_t& index) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + + uint64_t glob_index = get_output_global_index(amount, index); + return get_output_key(glob_index); } tx_out BlockchainLMDB::get_output(const crypto::hash& h, const uint64_t& index) const @@ -1731,53 +1796,17 @@ tx_out_index BlockchainLMDB::get_output_tx_and_index_from_global(const uint64_t& return tx_out_index(tx_hash, *(const uint64_t *)v.mv_data); } -tx_out_index BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const +tx_out_index BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); - check_open(); - - mdb_txn_safe txn; - mdb_txn_safe* txn_ptr = &txn; - if (m_batch_active) - txn_ptr = m_write_txn; - else - { - if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) - throw0(DB_ERROR("Failed to create a transaction for the db")); - } - lmdb_cur cur(*txn_ptr, m_output_amounts); - - MDB_val_copy k(amount); - MDB_val v; - - auto result = mdb_cursor_get(cur, &k, &v, MDB_SET); - if (result == MDB_NOTFOUND) + std::vector < uint64_t > offsets; + std::vector indices; + offsets.push_back(index); + get_output_tx_and_index(amount, offsets, indices); + if (!indices.size()) throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); - else if (result) - throw0(DB_ERROR("DB error attempting to get an output")); - size_t num_elems = 0; - mdb_cursor_count(cur, &num_elems); - if (num_elems <= index) - throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); - - mdb_cursor_get(cur, &k, &v, MDB_FIRST_DUP); - - for (uint64_t i = 0; i < index; ++i) - { - mdb_cursor_get(cur, &k, &v, MDB_NEXT_DUP); - } - - mdb_cursor_get(cur, &k, &v, MDB_GET_CURRENT); - - uint64_t glob_index = *(const uint64_t*)v.mv_data; - - cur.close(); - - if (! m_batch_active) - txn.commit(); - - return get_output_tx_and_index_from_global(glob_index); + return indices[0]; } std::vector BlockchainLMDB::get_tx_output_indices(const crypto::hash& h) const @@ -2016,12 +2045,8 @@ void BlockchainLMDB::set_batch_transactions(bool batch_transactions) LOG_PRINT_L3("batch transactions " << (m_batch_transactions ? "enabled" : "disabled")); } -uint64_t BlockchainLMDB::add_block( const block& blk - , const size_t& block_size - , const difficulty_type& cumulative_difficulty - , const uint64_t& coins_generated - , const std::vector& txs - ) +uint64_t BlockchainLMDB::add_block(const block& blk, const size_t& block_size, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, + const std::vector& txs) { LOG_PRINT_L3("BlockchainLMDB::" << __func__); check_open(); @@ -2103,4 +2128,213 @@ void BlockchainLMDB::pop_block(block& blk, std::vector& txs) --m_height; } +void BlockchainLMDB::get_output_tx_and_index_from_global(const std::vector &global_indices, + std::vector &tx_out_indices) const +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + tx_out_indices.clear(); + + mdb_txn_safe txn; + mdb_txn_safe* txn_ptr = &txn; + if (m_batch_active) + txn_ptr = m_write_txn; + else + { + if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) + throw0(DB_ERROR("Failed to create a transaction for the db")); + } + + for (const uint64_t &index : global_indices) + { + MDB_val_copy k(index); + MDB_val v; + + auto get_result = mdb_get(*txn_ptr, m_output_txs, &k, &v); + if (get_result == MDB_NOTFOUND) + throw1(OUTPUT_DNE("output with given index not in db")); + else if (get_result) + throw0(DB_ERROR("DB error attempting to fetch output tx hash")); + + crypto::hash tx_hash = *(crypto::hash*) v.mv_data; + + get_result = mdb_get(*txn_ptr, m_output_indices, &k, &v); + if (get_result == MDB_NOTFOUND) + throw1(OUTPUT_DNE("output with given index not in db")); + else if (get_result) + throw0(DB_ERROR("DB error attempting to fetch output tx index")); + + auto result = tx_out_index(tx_hash, *(const uint64_t *) v.mv_data); + tx_out_indices.push_back(result); + } + + if (!m_batch_active) + txn.commit(); +} + +void BlockchainLMDB::get_output_global_indices(const uint64_t& amount, const std::vector &offsets, + std::vector &global_indices) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + TIME_MEASURE_START(txx); + check_open(); + global_indices.clear(); + + uint64_t max = 0; + for (const uint64_t &index : offsets) + { + if (index > max) + max = index; + } + + mdb_txn_safe txn; + mdb_txn_safe* txn_ptr = &txn; + if(m_batch_active) + txn_ptr = m_write_txn; + else + { + if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) + throw0(DB_ERROR("Failed to create a transaction for the db")); + } + + lmdb_cur cur(*txn_ptr, m_output_amounts); + + MDB_val_copy k(amount); + MDB_val v; + auto result = mdb_cursor_get(cur, &k, &v, MDB_SET); + if (result == MDB_NOTFOUND) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but amount not found")); + else if (result) + throw0(DB_ERROR("DB error attempting to get an output")); + + size_t num_elems = 0; + mdb_cursor_count(cur, &num_elems); + if (max <= 1 && num_elems <= max) + throw1(OUTPUT_DNE("Attempting to get an output index by amount and amount index, but output not found")); + + uint64_t t_dbmul = 0; + uint64_t t_dbscan = 0; + if (max <= 1) + { + for (const uint64_t& index : offsets) + { + mdb_cursor_get(cur, &k, &v, MDB_FIRST_DUP); + for (uint64_t i = 0; i < index; ++i) + { + mdb_cursor_get(cur, &k, &v, MDB_NEXT_DUP); + } + + mdb_cursor_get(cur, &k, &v, MDB_GET_CURRENT); + uint64_t glob_index = *(const uint64_t*) v.mv_data; + LOG_PRINT_L3("Amount: " << amount << " M0->v: " << glob_index); + global_indices.push_back(glob_index); + } + } + else + { + uint32_t curcount = 0; + uint32_t blockstart = 0; + for (const uint64_t& index : offsets) + { + if (index >= num_elems) + { + LOG_PRINT_L1("Index: " << index << " Elems: " << num_elems << " partial results found for get_output_tx_and_index"); + break; + } + while (index >= curcount) + { + TIME_MEASURE_START(db1); + if (mdb_cursor_get(cur, &k, &v, curcount == 0 ? MDB_GET_MULTIPLE : MDB_NEXT_MULTIPLE) != 0) + { + // allow partial results + result = false; + break; + } + + int count = v.mv_size / sizeof(uint64_t); + + blockstart = curcount; + curcount += count; + TIME_MEASURE_FINISH(db1); + t_dbmul += db1; + } + + LOG_PRINT_L3("Records returned: " << curcount << " Index: " << index); + TIME_MEASURE_START(db2); + uint64_t actual_index = index - blockstart; + uint64_t glob_index = ((const uint64_t*) v.mv_data)[actual_index]; + + LOG_PRINT_L3("Amount: " << amount << " M1->v: " << glob_index); + global_indices.push_back(glob_index); + + TIME_MEASURE_FINISH(db2); + t_dbscan += db2; + + } + } + + cur.close(); + if(!m_batch_active) + txn.commit(); + + TIME_MEASURE_FINISH(txx); + LOG_PRINT_L3("txx: " << txx << " db1: " << t_dbmul << " db2: " << t_dbscan); +} + +void BlockchainLMDB::get_output_key(const uint64_t &amount, const std::vector &offsets, std::vector &outputs) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + TIME_MEASURE_START(db3); + check_open(); + outputs.clear(); + + std::vector global_indices; + get_output_global_indices(amount, offsets, global_indices); + + if (global_indices.size() > 0) + { + mdb_txn_safe txn; + if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn)) + throw0(DB_ERROR("Failed to create a transaction for the db")); + + for (const uint64_t &index : global_indices) + { + MDB_val_copy k(index); + MDB_val v; + + auto get_result = mdb_get(txn, m_output_keys, &k, &v); + if (get_result != 0) + throw0(DB_ERROR("Attempting to get output pubkey by global index, but key does not exist")); + else if (get_result) + throw0(DB_ERROR("Error attempting to retrieve an output pubkey from the db")); + + output_data_t data = *(output_data_t *) v.mv_data; + outputs.push_back(data); + } + + txn.commit(); + } + + TIME_MEASURE_FINISH(db3); + LOG_PRINT_L3("db3: " << db3); +} + +void BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const std::vector &offsets, std::vector &indices) +{ + LOG_PRINT_L3("BlockchainLMDB::" << __func__); + check_open(); + indices.clear(); + + std::vector global_indices; + get_output_global_indices(amount, offsets, global_indices); + + TIME_MEASURE_START(db3); + if(global_indices.size() > 0) + { + get_output_tx_and_index_from_global(global_indices, indices); + } + TIME_MEASURE_FINISH(db3); + LOG_PRINT_L3("db3: " << db3); +} + } // namespace cryptonote diff --git a/src/blockchain_db/lmdb/db_lmdb.h b/src/blockchain_db/lmdb/db_lmdb.h index cbab431b..0facb871 100644 --- a/src/blockchain_db/lmdb/db_lmdb.h +++ b/src/blockchain_db/lmdb/db_lmdb.h @@ -33,6 +33,8 @@ #include +#define ENABLE_AUTO_RESIZE + namespace cryptonote { @@ -159,7 +161,9 @@ public: virtual uint64_t get_num_outputs(const uint64_t& amount) const; - virtual crypto::public_key get_output_key(const uint64_t& amount, const uint64_t& index) const; + virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index); + virtual output_data_t get_output_key(const uint64_t& global_index) const; + virtual void get_output_key(const uint64_t &amount, const std::vector &offsets, std::vector &outputs); virtual tx_out get_output(const crypto::hash& h, const uint64_t& index) const; @@ -175,12 +179,12 @@ public: tx_out get_output(const uint64_t& index) const; virtual tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const; + virtual void get_output_tx_and_index_from_global(const std::vector &global_indices, + std::vector &tx_out_indices) const; - virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const; - virtual void get_output_tx_and_index(const uint64_t& amount, std::vector &offsets, std::vector &indices) const - { - // do nothing - }; + virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index); + virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector &offsets, std::vector &indices); + virtual void get_output_global_indices(const uint64_t& amount, const std::vector &offsets, std::vector &indices); virtual std::vector get_tx_output_indices(const crypto::hash& h) const; virtual std::vector get_tx_amount_output_indices(const crypto::hash& h) const; @@ -202,7 +206,7 @@ public: virtual void pop_block(block& blk, std::vector& txs); - virtual bool has_bulk_indices() const { return false; } + virtual bool can_thread_bulk_indices() const { return true; } private: void do_resize(uint64_t size_increase=0); @@ -223,7 +227,7 @@ private: virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx); - virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index); + virtual void add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time); virtual void remove_output(const tx_out& tx_output); @@ -262,7 +266,7 @@ private: * * @return the global index of the desired output */ - uint64_t get_output_global_index(const uint64_t& amount, const uint64_t& index) const; + uint64_t get_output_global_index(const uint64_t& amount, const uint64_t& index); void check_open() const; @@ -299,9 +303,18 @@ private: bool m_batch_transactions; // support for batch transactions bool m_batch_active; // whether batch transaction is in progress - constexpr static uint64_t DEFAULT_MAPSIZE = 1 << 30; +#if defined(__arm__) + // force a value so it can compile with 32-bit ARM + constexpr static uint64_t DEFAULT_MAPSIZE = 1LL << 31; +#else +#if defined(ENABLE_AUTO_RESIZE) + constexpr static uint64_t DEFAULT_MAPSIZE = 1LL << 30; +#else + constexpr static uint64_t DEFAULT_MAPSIZE = 1LL << 33; +#endif +#endif + constexpr static float RESIZE_PERCENT = 0.8f; - constexpr static float RESIZE_FACTOR = 1.5f; }; } // namespace cryptonote diff --git a/src/blocks/CMakeLists.txt b/src/blocks/CMakeLists.txt new file mode 100644 index 00000000..0ad88538 --- /dev/null +++ b/src/blocks/CMakeLists.txt @@ -0,0 +1,37 @@ +# Copyright (c) 2014-2015, The Monero Project +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are +# permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of +# conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list +# of conditions and the following disclaimer in the documentation and/or other +# materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be +# used to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +if(APPLE) + add_library(blocks STATIC blockexports.c) + set_target_properties(blocks PROPERTIES LINKER_LANGUAGE C) +else() + add_custom_command(OUTPUT blocks.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ld -r -b binary -o ${CMAKE_CURRENT_BINARY_DIR}/blocks.o blocks.dat) + add_library(blocks STATIC blocks.o blockexports.c) + set_target_properties(blocks PROPERTIES LINKER_LANGUAGE C) +endif() + diff --git a/src/blocks/blockexports.c b/src/blocks/blockexports.c new file mode 100644 index 00000000..cea72b29 --- /dev/null +++ b/src/blocks/blockexports.c @@ -0,0 +1,48 @@ +#include + +#if defined(__APPLE__) +#include + +#if !defined(__LP64__) +extern const struct mach_header _mh_execute_header; +#else +extern const struct mach_header_64 _mh_execute_header; +#endif + +const unsigned char *get_blocks_dat_start() +{ + size_t size; + return getsectiondata(&_mh_execute_header, "__DATA", "__blocks_dat", &size); +} + +size_t get_blocks_dat_size() +{ + size_t size; + getsectiondata(&_mh_execute_header, "__DATA", "__blocks_dat", &size); + return size; +} + +#else + +#if defined(_WIN32) && !defined(_WIN64) +#define _binary_blocks_start binary_blocks_dat_start +#define _binary_blocks_end binary_blocks_dat_end +#else +#define _binary_blocks_start _binary_blocks_dat_start +#define _binary_blocks_end _binary_blocks_dat_end +#endif + +extern const unsigned char _binary_blocks_start[]; +extern const unsigned char _binary_blocks_end[]; + +const unsigned char *get_blocks_dat_start(void) +{ + return _binary_blocks_start; +} + +size_t get_blocks_dat_size(void) +{ + return (size_t) (_binary_blocks_end - _binary_blocks_start); +} + +#endif diff --git a/src/blocks/blocks.dat b/src/blocks/blocks.dat new file mode 100644 index 00000000..249956e7 Binary files /dev/null and b/src/blocks/blocks.dat differ diff --git a/src/blocks/blocks.h b/src/blocks/blocks.h new file mode 100644 index 00000000..76a08c89 --- /dev/null +++ b/src/blocks/blocks.h @@ -0,0 +1,16 @@ +#ifndef SRC_BLOCKS_BLOCKS_H_ +#define SRC_BLOCKS_BLOCKS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +const unsigned char *get_blocks_dat_start(); +size_t get_blocks_dat_size(); + +#ifdef __cplusplus +} +#endif + + +#endif /* SRC_BLOCKS_BLOCKS_H_ */ diff --git a/src/crypto/aesb.c b/src/crypto/aesb.c index ba48313d..128c523e 100644 --- a/src/crypto/aesb.c +++ b/src/crypto/aesb.c @@ -140,7 +140,15 @@ extern "C" d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3); -void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey) +#if !defined(STATIC) +#define STATIC +#endif + +#if !defined(INLINE) +#define INLINE +#endif + +STATIC INLINE void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey) { uint32_t b0[4], b1[4]; const uint32_t *kp = (uint32_t *) expandedKey; @@ -151,7 +159,7 @@ void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey) state_out(out, b1); } -void aesb_pseudo_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey) +STATIC INLINE void aesb_pseudo_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey) { uint32_t b0[4], b1[4]; const uint32_t *kp = (uint32_t *) expandedKey; diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c index 42573798..60699d4e 100644 --- a/src/crypto/slow-hash.c +++ b/src/crypto/slow-hash.c @@ -624,6 +624,196 @@ void cn_slow_hash(const void *data, size_t length, char *hash) extra_hashes[state.hs.b[0] & 3](&state, 200, hash); } +#elif defined(__arm__) +// ND: Some minor optimizations for ARM7 (raspberrry pi 2), effect seems to be ~40-50% faster. +// Needs more work. +void slow_hash_allocate_state(void) +{ + // Do nothing, this is just to maintain compatibility with the upgraded slow-hash.c + return; +} + +void slow_hash_free_state(void) +{ + // As above + return; +} + +static void (*const extra_hashes[4])(const void *, size_t, char *) = { + hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein +}; + +#define MEMORY (1 << 21) /* 2 MiB */ +#define ITER (1 << 20) +#define AES_BLOCK_SIZE 16 +#define AES_KEY_SIZE 32 /*16*/ +#define INIT_SIZE_BLK 8 +#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) + +#if defined(__GNUC__) +#define RDATA_ALIGN16 __attribute__ ((aligned(16))) +#define STATIC static +#define INLINE inline +#else +#define RDATA_ALIGN16 +#define STATIC static +#define INLINE +#endif + +#define U64(x) ((uint64_t *) (x)) + +#include "aesb.c" + +STATIC INLINE void ___mul128(uint32_t *a, uint32_t *b, uint32_t *h, uint32_t *l) +{ + // ND: 64x64 multiplication for ARM7 + __asm__ __volatile__ + ( + // lo hi + "umull %[r0], %[r1], %[b], %[d]\n\t" // bd [r0 = bd.lo] + "umull %[r2], %[r3], %[b], %[c]\n\t" // bc + "umull %[b], %[c], %[a], %[c]\n\t" // ac + "adds %[r1], %[r1], %[r2]\n\t" // r1 = bd.hi + bc.lo + "adcs %[r2], %[r3], %[b]\n\t" // r2 = ac.lo + bc.hi + carry + "adc %[r3], %[c], #0\n\t" // r3 = ac.hi + carry + "umull %[b], %[a], %[a], %[d]\n\t" // ad + "adds %[r1], %[r1], %[b]\n\t" // r1 = bd.hi + bc.lo + ad.lo + "adcs %[r2], %[r2], %[a]\n\t" // r2 = ac.lo + bc.hi + ad.hi + carry + "adc %[r3], %[r3], #0\n\t" // r3 = ac.hi + carry + : [r0]"=&r"(l[0]), [r1]"=&r"(l[1]), [r2]"=&r"(h[0]), [r3]"=&r"(h[1]) + : [a]"r"(a[1]), [b]"r"(a[0]), [c]"r"(b[1]), [d]"r"(b[0]) + : "cc" + ); +} + +STATIC INLINE void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) +{ + ___mul128((uint32_t *) a, (uint32_t *) b, (uint32_t *) (res + 0), (uint32_t *) (res + 8)); +} + +STATIC INLINE void sum_half_blocks(uint8_t* a, const uint8_t* b) +{ + uint64_t a0, a1, b0, b1; + a0 = U64(a)[0]; + a1 = U64(a)[1]; + b0 = U64(b)[0]; + b1 = U64(b)[1]; + a0 += b0; + a1 += b1; + U64(a)[0] = a0; + U64(a)[1] = a1; +} + +STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b) +{ + uint64_t t[2]; + U64(t)[0] = U64(a)[0]; + U64(t)[1] = U64(a)[1]; + U64(a)[0] = U64(b)[0]; + U64(a)[1] = U64(b)[1]; + U64(b)[0] = U64(t)[0]; + U64(b)[1] = U64(t)[1]; +} + +STATIC INLINE void xor_blocks(uint8_t* a, const uint8_t* b) +{ + U64(a)[0] ^= U64(b)[0]; + U64(a)[1] ^= U64(b)[1]; +} + +#pragma pack(push, 1) +union cn_slow_hash_state +{ + union hash_state hs; + struct + { + uint8_t k[64]; + uint8_t init[INIT_SIZE_BYTE]; + }; +}; +#pragma pack(pop) + +void cn_slow_hash(const void *data, size_t length, char *hash) +{ + uint8_t long_state[MEMORY]; + uint8_t text[INIT_SIZE_BYTE]; + uint8_t a[AES_BLOCK_SIZE]; + uint8_t b[AES_BLOCK_SIZE]; + uint8_t d[AES_BLOCK_SIZE]; + uint8_t aes_key[AES_KEY_SIZE]; + RDATA_ALIGN16 uint8_t expandedKey[256]; + + union cn_slow_hash_state state; + + size_t i, j; + uint8_t *p = NULL; + oaes_ctx *aes_ctx; + static void (*const extra_hashes[4])(const void *, size_t, char *) = + { + hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein + }; + + hash_process(&state.hs, data, length); + memcpy(text, state.init, INIT_SIZE_BYTE); + + aes_ctx = (oaes_ctx *) oaes_alloc(); + oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE); + + // use aligned data + memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + for(j = 0; j < INIT_SIZE_BLK; j++) + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey); + memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + } + + U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0]; + U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1]; + U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0]; + U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1]; + + for(i = 0; i < ITER / 2; i++) + { + #define MASK ((uint32_t)(((MEMORY / AES_BLOCK_SIZE) - 1) << 4)) + #define state_index(x) ((*(uint32_t *) x) & MASK) + + // Iteration 1 + p = &long_state[state_index(a)]; + aesb_single_round(p, p, a); + + xor_blocks(b, p); + swap_blocks(b, p); + swap_blocks(a, b); + + // Iteration 2 + p = &long_state[state_index(a)]; + + mul(a, p, d); + sum_half_blocks(b, d); + swap_blocks(b, p); + xor_blocks(b, p); + swap_blocks(a, b); + } + + memcpy(text, state.init, INIT_SIZE_BYTE); + oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); + memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + for(j = 0; j < INIT_SIZE_BLK; j++) + { + xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey); + } + } + + oaes_free((OAES_CTX **) &aes_ctx); + memcpy(state.init, text, INIT_SIZE_BYTE); + hash_permutation(&state.hs); + extra_hashes[state.hs.b[0] & 3](&state, 200, hash); +} + #else // Portable implementation as a fallback diff --git a/src/cryptonote_core/CMakeLists.txt b/src/cryptonote_core/CMakeLists.txt index e144a93a..564dc321 100644 --- a/src/cryptonote_core/CMakeLists.txt +++ b/src/cryptonote_core/CMakeLists.txt @@ -62,6 +62,12 @@ set(cryptonote_core_private_headers tx_pool.h verification_context.h) +if(PER_BLOCK_CHECKPOINT) + set(Blocks "blocks") +else() + set(Blocks "") +endif() + bitmonero_private_headers(cryptonote_core ${crypto_private_headers}) bitmonero_add_library(cryptonote_core @@ -78,6 +84,7 @@ target_link_libraries(cryptonote_core ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_SERIALIZATION_LIBRARY} LINK_PRIVATE + ${Blocks} ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY} diff --git a/src/cryptonote_core/blockchain.cpp b/src/cryptonote_core/blockchain.cpp index cb442e05..31e517ff 100644 --- a/src/cryptonote_core/blockchain.cpp +++ b/src/cryptonote_core/blockchain.cpp @@ -50,6 +50,10 @@ #include "warnings.h" #include "crypto/hash.h" #include "cryptonote_core/checkpoints_create.h" +#if defined(PER_BLOCK_CHECKPOINT) +#include "blocks/blocks.h" +#endif + //#include "serialization/json_archive.h" /* TODO: @@ -60,286 +64,358 @@ using namespace cryptonote; using epee::string_tools::pod_to_hex; +extern "C" void slow_hash_allocate_state(); +extern "C" void slow_hash_free_state(); DISABLE_VS_WARNINGS(4267) //------------------------------------------------------------------ -Blockchain::Blockchain(tx_memory_pool& tx_pool):m_db(), m_tx_pool(tx_pool), m_current_block_cumul_sz_limit(0), m_is_in_checkpoint_zone(false), m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false) +Blockchain::Blockchain(tx_memory_pool& tx_pool) : +m_db(), m_tx_pool(tx_pool), m_timestamps_and_difficulties_height(0), m_current_block_cumul_sz_limit(0), m_is_in_checkpoint_zone(false), +m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false), m_max_prepare_blocks_threads(4), m_db_blocks_per_sync(1), m_db_sync_mode(db_async), m_fast_sync(true) { - LOG_PRINT_L3("Blockchain::" << __func__); + LOG_PRINT_L3("Blockchain::" << __func__); } //------------------------------------------------------------------ //TODO: is this still needed? I don't think so - tewinget template void Blockchain::serialize(archive_t & ar, const unsigned int version) { - LOG_PRINT_L3("Blockchain::" << __func__); - if(version < 11) - return; - CRITICAL_REGION_LOCAL(m_blockchain_lock); - ar & m_blocks; - ar & m_blocks_index; - ar & m_transactions; - ar & m_spent_keys; - ar & m_alternative_chains; - ar & m_outputs; - ar & m_invalid_blocks; - ar & m_current_block_cumul_sz_limit; - /*serialization bug workaround*/ - if(version > 11) - { - uint64_t total_check_count = m_db->height() + m_blocks_index.size() + m_transactions.size() + m_spent_keys.size() + m_alternative_chains.size() + m_outputs.size() + m_invalid_blocks.size() + m_current_block_cumul_sz_limit; - if(archive_t::is_saving::value) - { - ar & total_check_count; - }else + LOG_PRINT_L3("Blockchain::" << __func__); + if(version < 11) + return; + CRITICAL_REGION_LOCAL(m_blockchain_lock); + ar & m_blocks; + ar & m_blocks_index; + ar & m_transactions; + ar & m_spent_keys; + ar & m_alternative_chains; + ar & m_outputs; + ar & m_invalid_blocks; + ar & m_current_block_cumul_sz_limit; + /*serialization bug workaround*/ + if(version > 11) { - uint64_t total_check_count_loaded = 0; - ar & total_check_count_loaded; - if(total_check_count != total_check_count_loaded) - { - LOG_ERROR("Blockchain storage data corruption detected. total_count loaded from file = " << total_check_count_loaded << ", expected = " << total_check_count); + uint64_t total_check_count = m_db->height() + m_blocks_index.size() + m_transactions.size() + m_spent_keys.size() + m_alternative_chains.size() + m_outputs.size() + m_invalid_blocks.size() + m_current_block_cumul_sz_limit; + if(archive_t::is_saving::value) + { + ar & total_check_count; + } + else + { + uint64_t total_check_count_loaded = 0; + ar & total_check_count_loaded; + if(total_check_count != total_check_count_loaded) + { + LOG_ERROR("Blockchain storage data corruption detected. total_count loaded from file = " << total_check_count_loaded << ", expected = " << total_check_count); - LOG_PRINT_L0("Blockchain storage:" << std::endl << - "m_blocks: " << m_db->height() << std::endl << - "m_blocks_index: " << m_blocks_index.size() << std::endl << - "m_transactions: " << m_transactions.size() << std::endl << - "m_spent_keys: " << m_spent_keys.size() << std::endl << - "m_alternative_chains: " << m_alternative_chains.size() << std::endl << - "m_outputs: " << m_outputs.size() << std::endl << - "m_invalid_blocks: " << m_invalid_blocks.size() << std::endl << - "m_current_block_cumul_sz_limit: " << m_current_block_cumul_sz_limit); + LOG_PRINT_L0("Blockchain storage:" << std::endl << "m_blocks: " << m_db->height() << std::endl << "m_blocks_index: " << m_blocks_index.size() << std::endl << "m_transactions: " << m_transactions.size() << std::endl << "m_spent_keys: " << m_spent_keys.size() << std::endl << "m_alternative_chains: " << m_alternative_chains.size() << std::endl << "m_outputs: " << m_outputs.size() << std::endl << "m_invalid_blocks: " << m_invalid_blocks.size() << std::endl << "m_current_block_cumul_sz_limit: " << m_current_block_cumul_sz_limit); - throw std::runtime_error("Blockchain data corruption"); - } + throw std::runtime_error("Blockchain data corruption"); + } + } } - } - - LOG_PRINT_L3("Blockchain storage:" << std::endl << - "m_blocks: " << m_db->height() << std::endl << - "m_blocks_index: " << m_blocks_index.size() << std::endl << - "m_transactions: " << m_transactions.size() << std::endl << - "m_spent_keys: " << m_spent_keys.size() << std::endl << - "m_alternative_chains: " << m_alternative_chains.size() << std::endl << - "m_outputs: " << m_outputs.size() << std::endl << - "m_invalid_blocks: " << m_invalid_blocks.size() << std::endl << - "m_current_block_cumul_sz_limit: " << m_current_block_cumul_sz_limit); + LOG_PRINT_L3("Blockchain storage:" << std::endl << "m_blocks: " << m_db->height() << std::endl << "m_blocks_index: " << m_blocks_index.size() << std::endl << "m_transactions: " << m_transactions.size() << std::endl << "m_spent_keys: " << m_spent_keys.size() << std::endl << "m_alternative_chains: " << m_alternative_chains.size() << std::endl << "m_outputs: " << m_outputs.size() << std::endl << "m_invalid_blocks: " << m_invalid_blocks.size() << std::endl << "m_current_block_cumul_sz_limit: " << m_current_block_cumul_sz_limit); } //------------------------------------------------------------------ bool Blockchain::have_tx(const crypto::hash &id) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_db->tx_exists(id); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_db->tx_exists(id); } //------------------------------------------------------------------ bool Blockchain::have_tx_keyimg_as_spent(const crypto::key_image &key_im) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_db->has_key_image(key_im); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_db->has_key_image(key_im); } //------------------------------------------------------------------ // This function makes sure that each "input" in an input (mixins) exists // and collects the public key for each from the transaction it was included in // via the visitor passed to it. -template -bool Blockchain::scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t& vis, uint64_t* pmax_related_block_height) const +template +bool Blockchain::scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); - // verify that the input has key offsets (that it exists properly, really) - if(!tx_in_to_key.key_offsets.size()) - return false; + // ND: Disable locking and make method private. + //CRITICAL_REGION_LOCAL(m_blockchain_lock); - // cryptonote_format_utils uses relative offsets for indexing to the global - // outputs list. that is to say that absolute offset #2 is absolute offset - // #1 plus relative offset #2. - // TODO: Investigate if this is necessary / why this is done. - std::vector absolute_offsets = relative_output_offsets_to_absolute(tx_in_to_key.key_offsets); - std::vector indices; + // verify that the input has key offsets (that it exists properly, really) + if(!tx_in_to_key.key_offsets.size()) + return false; - // if(typeid(*m_db) == typeid(BlockchainBDB)) - if(m_db->has_bulk_indices()) - { - m_db->get_output_tx_and_index(tx_in_to_key.amount, absolute_offsets, indices); - } + // cryptonote_format_utils uses relative offsets for indexing to the global + // outputs list. that is to say that absolute offset #2 is absolute offset + // #1 plus relative offset #2. + // TODO: Investigate if this is necessary / why this is done. + std::vector absolute_offsets = relative_output_offsets_to_absolute(tx_in_to_key.key_offsets); + std::vector outputs; - //std::vector >& amount_outs_vec = it->second; - size_t count = 0; - for (const uint64_t& i : absolute_offsets) - { - try + bool found = false; + auto it = m_scan_table.find(tx_prefix_hash); + if (it != m_scan_table.end()) { - // get tx hash and output index for output - tx_out_index output_index; - - if(indices.size() == absolute_offsets.size()) - output_index = indices.at(count); - else - output_index = m_db->get_output_tx_and_index(tx_in_to_key.amount, i); - - // get tx that output is from - auto tx = m_db->get_tx(output_index.first); - - // make sure output index is within range for the given transaction - if (output_index.second >= tx.vout.size()) - { - LOG_PRINT_L0("Output does not exist. tx = " << output_index.first << ", index = " << output_index.second); - return false; - } - - // call to the passed boost visitor to grab the public key for the output - if(!vis.handle_output(tx, tx.vout[output_index.second])) - { - LOG_PRINT_L0("Failed to handle_output for output no = " << count << ", with absolute offset " << i); - return false; - } - - // if on last output and pmax_related_block_height not null pointer - if(++count == absolute_offsets.size() && pmax_related_block_height) - { - // set *pmax_related_block_height to tx block height for this output - auto h = m_db->get_tx_block_height(output_index.first); - if(*pmax_related_block_height < h) + auto its = it->second.find(tx_in_to_key.k_image); + if (its != it->second.end()) { - *pmax_related_block_height = h; + outputs = its->second; + found = true; } - } - } - catch (const OUTPUT_DNE& e) + + if (!found) { - LOG_PRINT_L0("Output does not exist: " << e.what()); - return false; + m_db->get_output_key(tx_in_to_key.amount, absolute_offsets, outputs); } - catch (const TX_DNE& e) + else { - LOG_PRINT_L0("Transaction does not exist: " << e.what()); - return false; + // check for partial results and add the rest if needed; + if (outputs.size() < absolute_offsets.size() && outputs.size() > 0) + { + LOG_PRINT_L1("Additional outputs needed: " << absolute_offsets.size() - outputs.size()); + std::vector < uint64_t > add_offsets; + std::vector add_outputs; + for (size_t i = outputs.size(); i < absolute_offsets.size(); i++) + add_offsets.push_back(absolute_offsets[i]); + m_db->get_output_key(tx_in_to_key.amount, add_offsets, add_outputs); + outputs.insert(outputs.end(), add_outputs.begin(), add_outputs.end()); + } } - } + size_t count = 0; + for (const uint64_t& i : absolute_offsets) + { + try + { + output_data_t output_index; + try + { + // get tx hash and output index for output + if (count < outputs.size()) + output_index = outputs.at(count); + else + output_index = m_db->get_output_key(tx_in_to_key.amount, i); - return true; + // call to the passed boost visitor to grab the public key for the output + if (!vis.handle_output(output_index.unlock_time, output_index.pubkey)) + { + LOG_PRINT_L0("Failed to handle_output for output no = " << count << ", with absolute offset " << i); + return false; + } + } + catch (...) + { + LOG_PRINT_L0("Output does not exist! amount = " << tx_in_to_key.amount << ", absolute_offset = " << i); + return false; + } + + // if on last output and pmax_related_block_height not null pointer + if(++count == absolute_offsets.size() && pmax_related_block_height) + { + // set *pmax_related_block_height to tx block height for this output + auto h = output_index.height; + if(*pmax_related_block_height < h) + { + *pmax_related_block_height = h; + } + } + + } + catch (const OUTPUT_DNE& e) + { + LOG_PRINT_L0("Output does not exist: " << e.what()); + return false; + } + catch (const TX_DNE& e) + { + LOG_PRINT_L0("Transaction does not exist: " << e.what()); + return false; + } + + } + + return true; } //------------------------------------------------------------------ uint64_t Blockchain::get_current_blockchain_height() const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_db->height(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_db->height(); } //------------------------------------------------------------------ //FIXME: possibly move this into the constructor, to avoid accidentally // dereferencing a null BlockchainDB pointer bool Blockchain::init(BlockchainDB* db, const bool testnet) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - if (db == nullptr) - { - LOG_ERROR("Attempted to init Blockchain with null DB"); - return false; - } - if (!db->is_open()) - { - LOG_ERROR("Attempted to init Blockchain with unopened DB"); - return false; - } - - m_db = db; - - // if the blockchain is new, add the genesis block - // this feels kinda kludgy to do it this way, but can be looked at later. - // TODO: add function to create and store genesis block, - // taking testnet into account - if(!m_db->height()) - { - LOG_PRINT_L0("Blockchain not loaded, generating genesis block."); - block bl = boost::value_initialized(); - block_verification_context bvc = boost::value_initialized(); - if (testnet) + if (db == nullptr) { - generate_genesis_block(bl, config::testnet::GENESIS_TX, config::testnet::GENESIS_NONCE); + LOG_ERROR("Attempted to init Blockchain with null DB"); + return false; } + if (!db->is_open()) + { + LOG_ERROR("Attempted to init Blockchain with unopened DB"); + return false; + } + + m_db = db; + + // if the blockchain is new, add the genesis block + // this feels kinda kludgy to do it this way, but can be looked at later. + // TODO: add function to create and store genesis block, + // taking testnet into account + if(!m_db->height()) + { + LOG_PRINT_L0("Blockchain not loaded, generating genesis block."); + block bl = boost::value_initialized(); + block_verification_context bvc = boost::value_initialized(); + if (testnet) + { + generate_genesis_block(bl, config::testnet::GENESIS_TX, config::testnet::GENESIS_NONCE); + } + else + { + generate_genesis_block(bl, config::GENESIS_TX, config::GENESIS_NONCE); + } + add_new_block(bl, bvc); + CHECK_AND_ASSERT_MES(!bvc.m_verifivation_failed, false, "Failed to add genesis block to blockchain"); + } + // TODO: if blockchain load successful, verify blockchain against both + // hard-coded and runtime-loaded (and enforced) checkpoints. else { - generate_genesis_block(bl, config::GENESIS_TX, config::GENESIS_NONCE); } - add_new_block(bl, bvc); - CHECK_AND_ASSERT_MES(!bvc.m_verifivation_failed, false, "Failed to add genesis block to blockchain"); - } - // TODO: if blockchain load successful, verify blockchain against both - // hard-coded and runtime-loaded (and enforced) checkpoints. - else - { - } - // check how far behind we are - uint64_t top_block_timestamp = m_db->get_top_block_timestamp(); - uint64_t timestamp_diff = time(NULL) - top_block_timestamp; + // check how far behind we are + uint64_t top_block_timestamp = m_db->get_top_block_timestamp(); + uint64_t timestamp_diff = time(NULL) - top_block_timestamp; - // genesis block has no timestamp, could probably change it to have timestamp of 1341378000... - if(!top_block_timestamp) - timestamp_diff = time(NULL) - 1341378000; - LOG_PRINT_GREEN("Blockchain initialized. last block: " << m_db->height() - 1 << ", " << epee::misc_utils::get_time_interval_string(timestamp_diff) << " time ago, current difficulty: " << get_difficulty_for_next_block(), LOG_LEVEL_0); + // genesis block has no timestamp, could probably change it to have timestamp of 1341378000... + if(!top_block_timestamp) + timestamp_diff = time(NULL) - 1341378000; - return true; + // create general purpose async service queue + + m_async_work_idle = std::unique_ptr < boost::asio::io_service::work > (new boost::asio::io_service::work(m_async_service)); + // we only need 1 + m_async_pool.create_thread(boost::bind(&boost::asio::io_service::run, &m_async_service)); + +#if defined(PER_BLOCK_CHECKPOINT) + if (m_fast_sync && get_blocks_dat_start() != nullptr) + { + if (get_blocks_dat_size() > 4) + { + const unsigned char *p = get_blocks_dat_start(); + uint32_t nblocks = *(uint32_t *) p; + if(nblocks > 0 && nblocks > m_db->height()) + { + LOG_PRINT_L0("Loading precomputed blocks: " << nblocks); + p += sizeof(uint32_t); + for (uint32_t i = 0; i < nblocks; i++) + { + crypto::hash hash; + memcpy(hash.data, p, sizeof(hash.data)); + p += sizeof(hash.data); + m_blocks_hash_check.push_back(hash); + } + + // FIXME: clear tx_pool because the process might have been + // terminated and caused it to store txs kept by blocks. + // The core will not call check_tx_inputs(..) for these + // transactions in this case. Consequently, the sanity check + // for tx hashes will fail in handle_block_to_main_chain(..) + std::list txs; + m_tx_pool.get_transactions(txs); + + size_t blob_size; + uint64_t fee; + transaction pool_tx; + for(const transaction &tx : txs) + { + crypto::hash tx_hash = get_transaction_hash(tx); + m_tx_pool.take_tx(tx_hash, pool_tx, blob_size, fee); + } + } + } + } +#endif + + LOG_PRINT_GREEN("Blockchain initialized. last block: " << m_db->height() - 1 << ", " << epee::misc_utils::get_time_interval_string(timestamp_diff) << " time ago, current difficulty: " << get_difficulty_for_next_block(), LOG_LEVEL_0); + + return true; } //------------------------------------------------------------------ bool Blockchain::store_blockchain() { - LOG_PRINT_L3("Blockchain::" << __func__); - // TODO: make sure if this throws that it is not simply ignored higher - // up the call stack - try - { - m_db->sync(); - } - catch (const std::exception& e) - { - LOG_PRINT_L0(std::string("Error syncing blockchain db: ") + e.what() + "-- shutting down now to prevent issues!"); - throw; - } - catch (...) - { - LOG_PRINT_L0("There was an issue storing the blockchain, shutting down now to prevent issues!"); - throw; - } - LOG_PRINT_L0("Blockchain stored OK."); - return true; + LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3); + // lock because the rpc_thread command handler also calls this + CRITICAL_REGION_LOCAL(m_db->m_synchronization_lock); + + TIME_MEASURE_START(save); + // TODO: make sure sync(if this throws that it is not simply ignored higher + // up the call stack + try + { + m_db->sync(); + } + catch (const std::exception& e) + { + LOG_PRINT_L0(std::string("Error syncing blockchain db: ") + e.what() + "-- shutting down now to prevent issues!"); + throw; + } + catch (...) + { + LOG_PRINT_L0("There was an issue storing the blockchain, shutting down now to prevent issues!"); + throw; + } + + TIME_MEASURE_FINISH(save); + if(m_show_time_stats) + LOG_PRINT_L0("Blockchain stored OK, took: " << save << " ms"); + return true; } //------------------------------------------------------------------ bool Blockchain::deinit() { - LOG_PRINT_L3("Blockchain::" << __func__); - // as this should be called if handling a SIGSEGV, need to check - // if m_db is a NULL pointer (and thus may have caused the illegal - // memory operation), otherwise we may cause a loop. - if (m_db == NULL) - { - throw new DB_ERROR("The db pointer is null in Blockchain, the blockchain may be corrupt!"); - } + LOG_PRINT_L3("Blockchain::" << __func__); - try - { - m_db->close(); - } - catch (const std::exception& e) - { - LOG_PRINT_L0(std::string("Error closing blockchain db: ") + e.what()); - } - catch (...) - { - LOG_PRINT_L0("There was an issue closing/storing the blockchain, shutting down now to prevent issues!"); - } + LOG_PRINT_L0("Closing IO Service.") + // stop async service + m_async_work_idle.reset(); + m_async_pool.join_all(); + m_async_service.stop(); - delete m_db; - return true; + // as this should be called if handling a SIGSEGV, need to check + // if m_db is a NULL pointer (and thus may have caused the illegal + // memory operation), otherwise we may cause a loop. + if (m_db == NULL) + { + throw new DB_ERROR("The db pointer is null in Blockchain, the blockchain may be corrupt!"); + } + + try + { + m_db->close(); + } + catch (const std::exception& e) + { + LOG_PRINT_L0(std::string("Error closing blockchain db: ") + e.what()); + } + catch (...) + { + LOG_PRINT_L0("There was an issue closing/storing the blockchain, shutting down now to prevent issues!"); + } + + delete m_db; + return true; } //------------------------------------------------------------------ // This function tells BlockchainDB to remove the top block from the @@ -347,123 +423,129 @@ bool Blockchain::deinit() // from it to the tx_pool block Blockchain::pop_block_from_blockchain() { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - block popped_block; - std::vector popped_txs; + m_timestamps_and_difficulties_height = 0; - try - { - m_db->pop_block(popped_block, popped_txs); - } - // anything that could cause this to throw is likely catastrophic, - // so we re-throw - catch (const std::exception& e) - { - LOG_ERROR("Error popping block from blockchain: " << e.what()); - throw; - } - catch (...) - { - LOG_ERROR("Error popping block from blockchain, throwing!"); - throw; - } + block popped_block; + std::vector popped_txs; - // return transactions from popped block to the tx_pool - for (transaction& tx : popped_txs) - { - if (!is_coinbase(tx)) + try { - cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); - bool r = m_tx_pool.add_tx(tx, tvc, true); - if (!r) - { - LOG_ERROR("Error returning transaction to tx_pool"); - } + m_db->pop_block(popped_block, popped_txs); + } + // anything that could cause this to throw is likely catastrophic, + // so we re-throw + catch (const std::exception& e) + { + LOG_ERROR("Error popping block from blockchain: " << e.what()); + throw; + } + catch (...) + { + LOG_ERROR("Error popping block from blockchain, throwing!"); + throw; } - } - return popped_block; + // return transactions from popped block to the tx_pool + for (transaction& tx : popped_txs) + { + if (!is_coinbase(tx)) + { + cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); + bool r = m_tx_pool.add_tx(tx, tvc, true); + if (!r) + { + LOG_ERROR("Error returning transaction to tx_pool"); + } + } + } + + return popped_block; } //------------------------------------------------------------------ bool Blockchain::reset_and_set_genesis_block(const block& b) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - m_transactions.clear(); - m_spent_keys.clear(); - m_blocks.clear(); - m_blocks_index.clear(); - m_alternative_chains.clear(); - m_outputs.clear(); - m_db->reset(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + m_transactions.clear(); + m_spent_keys.clear(); + m_blocks.clear(); + m_blocks_index.clear(); + m_alternative_chains.clear(); + m_outputs.clear(); + m_db->reset(); - block_verification_context bvc = boost::value_initialized(); - add_new_block(b, bvc); - return bvc.m_added_to_main_chain && !bvc.m_verifivation_failed; + block_verification_context bvc = boost::value_initialized(); + add_new_block(b, bvc); + return bvc.m_added_to_main_chain && !bvc.m_verifivation_failed; } //------------------------------------------------------------------ //TODO: move to BlockchainDB subclass bool Blockchain::purge_transaction_keyimages_from_blockchain(const transaction& tx, bool strict_check) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); struct purge_transaction_visitor: public boost::static_visitor - { - key_images_container& m_spent_keys; - bool m_strict_check; - purge_transaction_visitor(key_images_container& spent_keys, bool strict_check):m_spent_keys(spent_keys), m_strict_check(strict_check){} + { + key_images_container& m_spent_keys; + bool m_strict_check; + purge_transaction_visitor(key_images_container& spent_keys, bool strict_check) : + m_spent_keys(spent_keys), m_strict_check(strict_check) + { + } - bool operator()(const txin_to_key& inp) const - { - //const crypto::key_image& ki = inp.k_image; - auto r = m_spent_keys.find(inp.k_image); - if(r != m_spent_keys.end()) - { - m_spent_keys.erase(r); - }else - { - CHECK_AND_ASSERT_MES(!m_strict_check, false, "purge_block_data_from_blockchain: key image in transaction not found"); - } - return true; - } - bool operator()(const txin_gen& inp) const - { - return true; - } - bool operator()(const txin_to_script& tx) const - { - return false; - } + bool operator()(const txin_to_key& inp) const + { + //const crypto::key_image& ki = inp.k_image; + auto r = m_spent_keys.find(inp.k_image); + if(r != m_spent_keys.end()) + { + m_spent_keys.erase(r); + } + else + { + CHECK_AND_ASSERT_MES(!m_strict_check, false, "purge_block_data_from_blockchain: key image in transaction not found"); + } + return true; + } + bool operator()(const txin_gen& inp) const + { + return true; + } + bool operator()(const txin_to_script& tx) const + { + return false; + } - bool operator()(const txin_to_scripthash& tx) const - { - return false; - } - }; + bool operator()(const txin_to_scripthash& tx) const + { + return false; + } + }; - BOOST_FOREACH(const txin_v& in, tx.vin) - { - bool r = boost::apply_visitor(purge_transaction_visitor(m_spent_keys, strict_check), in); - CHECK_AND_ASSERT_MES(!strict_check || r, false, "failed to process purge_transaction_visitor"); - } - return true; + BOOST_FOREACH(const txin_v& in, tx.vin) + { + bool r = boost::apply_visitor(purge_transaction_visitor(m_spent_keys, strict_check), in); + CHECK_AND_ASSERT_MES(!strict_check || r, false, "failed to process purge_transaction_visitor"); + } + return true; } //------------------------------------------------------------------ crypto::hash Blockchain::get_tail_id(uint64_t& height) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - height = m_db->height() - 1; - return get_tail_id(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + height = m_db->height() - 1; + return get_tail_id(); } //------------------------------------------------------------------ crypto::hash Blockchain::get_tail_id() const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_db->top_block_hash(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_db->top_block_hash(); } //------------------------------------------------------------------ /*TODO: this function was...poorly written. As such, I'm not entirely @@ -480,119 +562,120 @@ crypto::hash Blockchain::get_tail_id() const */ bool Blockchain::get_short_chain_history(std::list& ids) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - uint64_t i = 0; - uint64_t current_multiplier = 1; - uint64_t sz = m_db->height(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + uint64_t i = 0; + uint64_t current_multiplier = 1; + uint64_t sz = m_db->height(); + + if(!sz) + return true; + + bool genesis_included = false; + uint64_t current_back_offset = 1; + while(current_back_offset < sz) + { + ids.push_back(m_db->get_block_hash_from_height(sz - current_back_offset)); + + if(sz-current_back_offset == 0) + { + genesis_included = true; + } + if(i < 10) + { + ++current_back_offset; + } + else + { + current_multiplier *= 2; + current_back_offset += current_multiplier; + } + ++i; + } + + if (!genesis_included) + { + ids.push_back(m_db->get_block_hash_from_height(0)); + } - if(!sz) return true; - - bool genesis_included = false; - uint64_t current_back_offset = 1; - while(current_back_offset < sz) - { - ids.push_back(m_db->get_block_hash_from_height(sz - current_back_offset)); - - if(sz-current_back_offset == 0) - { - genesis_included = true; - } - if(i < 10) - { - ++current_back_offset; - } - else - { - current_multiplier *= 2; - current_back_offset += current_multiplier; - } - ++i; - } - - if (!genesis_included) - { - ids.push_back(m_db->get_block_hash_from_height(0)); - } - - return true; } //------------------------------------------------------------------ crypto::hash Blockchain::get_block_id_by_height(uint64_t height) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - try - { - return m_db->get_block_hash_from_height(height); - } - catch (const BLOCK_DNE& e) - { - } - catch (const std::exception& e) - { - LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height: ") + e.what()); - throw; - } - catch (...) - { - LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height")); - throw; - } - return null_hash; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + try + { + return m_db->get_block_hash_from_height(height); + } + catch (const BLOCK_DNE& e) + { + } + catch (const std::exception& e) + { + LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height: ") + e.what()); + throw; + } + catch (...) + { + LOG_PRINT_L0(std::string("Something went wrong fetching block hash by height")); + throw; + } + return null_hash; } //------------------------------------------------------------------ bool Blockchain::get_block_by_hash(const crypto::hash &h, block &blk) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // try to find block in main chain - try - { - blk = m_db->get_block(h); - return true; - } - // try to find block in alternative chain - catch (const BLOCK_DNE& e) - { - blocks_ext_by_hash::const_iterator it_alt = m_alternative_chains.find(h); - if (m_alternative_chains.end() != it_alt) { - blk = it_alt->second.bl; - return true; + // try to find block in main chain + try + { + blk = m_db->get_block(h); + return true; + } + // try to find block in alternative chain + catch (const BLOCK_DNE& e) + { + blocks_ext_by_hash::const_iterator it_alt = m_alternative_chains.find(h); + if (m_alternative_chains.end() != it_alt) + { + blk = it_alt->second.bl; + return true; + } + } + catch (const std::exception& e) + { + LOG_PRINT_L0(std::string("Something went wrong fetching block by hash: ") + e.what()); + throw; + } + catch (...) + { + LOG_PRINT_L0(std::string("Something went wrong fetching block hash by hash")); + throw; } - } - catch (const std::exception& e) - { - LOG_PRINT_L0(std::string("Something went wrong fetching block by hash: ") + e.what()); - throw; - } - catch (...) - { - LOG_PRINT_L0(std::string("Something went wrong fetching block hash by hash")); - throw; - } - return false; + return false; } //------------------------------------------------------------------ //FIXME: this function does not seem to be called from anywhere, but // if it ever is, should probably change std::list for std::vector void Blockchain::get_all_known_block_ids(std::list &main, std::list &alt, std::list &invalid) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - for (auto& a : m_db->get_hashes_range(0, m_db->height() - 1)) - { - main.push_back(a); - } + for (auto& a : m_db->get_hashes_range(0, m_db->height() - 1)) + { + main.push_back(a); + } - BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_alternative_chains) + BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_alternative_chains) alt.push_back(v.first); - BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_invalid_blocks) + BOOST_FOREACH(const blocks_ext_by_hash::value_type &v, m_invalid_blocks) invalid.push_back(v.first); } //------------------------------------------------------------------ @@ -600,27 +683,53 @@ void Blockchain::get_all_known_block_ids(std::list &main, std::lis // last DIFFICULTY_BLOCKS_COUNT blocks and passes them to next_difficulty, // returning the result of that call. Ignores the genesis block, and can use // less blocks than desired if there aren't enough. -difficulty_type Blockchain::get_difficulty_for_next_block() const +difficulty_type Blockchain::get_difficulty_for_next_block() { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - std::vector timestamps; - std::vector cumulative_difficulties; - auto h = m_db->height(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + std::vector timestamps; + std::vector difficulties; + auto height = m_db->height(); + // ND: Speedup + // 1. Keep a list of the last 735 (or less) blocks that is used to compute difficulty, + // then when the next block difficulty is queried, push the latest height data and + // pop the oldest one from the list. This only requires 1x read per height instead + // of doing 735 (DIFFICULTY_BLOCKS_COUNT). + if (m_timestamps_and_difficulties_height != 0 && ((height - m_timestamps_and_difficulties_height) == 1)) + { + uint64_t index = height - 1; + m_timestamps.push_back(m_db->get_block_timestamp(index)); + m_difficulties.push_back(m_db->get_block_cumulative_difficulty(index)); - size_t offset = h - std::min(h, static_cast(DIFFICULTY_BLOCKS_COUNT)); + while (m_timestamps.size() > DIFFICULTY_BLOCKS_COUNT) + m_timestamps.erase(m_timestamps.begin()); + while (m_difficulties.size() > DIFFICULTY_BLOCKS_COUNT) + m_difficulties.erase(m_difficulties.begin()); - if (offset == 0) - { - ++offset; - } + m_timestamps_and_difficulties_height = height; + timestamps = m_timestamps; + difficulties = m_difficulties; + } + else + { + size_t offset = height - std::min < size_t > (height, static_cast(DIFFICULTY_BLOCKS_COUNT)); + if (offset == 0) + ++offset; - for(; offset < h; offset++) - { - timestamps.push_back(m_db->get_block_timestamp(offset)); - cumulative_difficulties.push_back(m_db->get_block_cumulative_difficulty(offset)); - } - return next_difficulty(timestamps, cumulative_difficulties); + timestamps.clear(); + difficulties.clear(); + for (; offset < height; offset++) + { + timestamps.push_back(m_db->get_block_timestamp(offset)); + difficulties.push_back(m_db->get_block_cumulative_difficulty(offset)); + } + + m_timestamps_and_difficulties_height = height; + m_timestamps = timestamps; + m_difficulties = difficulties; + } + + return next_difficulty(timestamps, difficulties); } //------------------------------------------------------------------ // This function removes blocks from the blockchain until it gets to the @@ -628,186 +737,186 @@ difficulty_type Blockchain::get_difficulty_for_next_block() const // that had been removed. bool Blockchain::rollback_blockchain_switching(std::list& original_chain, uint64_t rollback_height) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // remove blocks from blockchain until we get back to where we should be. - while (m_db->height() != rollback_height) - { - pop_block_from_blockchain(); - } + m_timestamps_and_difficulties_height = 0; - //return back original chain - for (auto& bl : original_chain) - { - block_verification_context bvc = boost::value_initialized(); - bool r = handle_block_to_main_chain(bl, bvc); - CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!"); - } + // remove blocks from blockchain until we get back to where we should be. + while (m_db->height() != rollback_height) + { + pop_block_from_blockchain(); + } - LOG_PRINT_L1("Rollback to height " << rollback_height << " was successful."); - if (original_chain.size()) - { - LOG_PRINT_L1("Restoration to previous blockchain successful as well."); - } - return true; + //return back original chain + for (auto& bl : original_chain) + { + block_verification_context bvc = boost::value_initialized(); + bool r = handle_block_to_main_chain(bl, bvc); + CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!"); + } + + LOG_PRINT_L1("Rollback to height " << rollback_height << " was successful."); + if (original_chain.size()) + { + LOG_PRINT_L1("Restoration to previous blockchain successful as well."); + } + return true; } //------------------------------------------------------------------ // This function attempts to switch to an alternate chain, returning // boolean based on success therein. bool Blockchain::switch_to_alternative_blockchain(std::list& alt_chain, bool discard_disconnected_chain) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // if empty alt chain passed (not sure how that could happen), return false - CHECK_AND_ASSERT_MES(alt_chain.size(), false, "switch_to_alternative_blockchain: empty chain passed"); + m_timestamps_and_difficulties_height = 0; - // verify that main chain has front of alt chain's parent block - if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id)) - { - LOG_ERROR("Attempting to move to an alternate chain, but it doesn't appear to connect to the main chain!"); - return false; - } + // if empty alt chain passed (not sure how that could happen), return false + CHECK_AND_ASSERT_MES(alt_chain.size(), false, "switch_to_alternative_blockchain: empty chain passed"); - // pop blocks from the blockchain until the top block is the parent - // of the front block of the alt chain. - std::list disconnected_chain; - while (m_db->top_block_hash() != alt_chain.front()->second.bl.prev_id) - { - block b = pop_block_from_blockchain(); - disconnected_chain.push_front(b); - } - - auto split_height = m_db->height(); - - //connecting new alternative chain - for(auto alt_ch_iter = alt_chain.begin(); alt_ch_iter != alt_chain.end(); alt_ch_iter++) - { - auto ch_ent = *alt_ch_iter; - block_verification_context bvc = boost::value_initialized(); - - // add block to main chain - bool r = handle_block_to_main_chain(ch_ent->second.bl, bvc); - - // if adding block to main chain failed, rollback to previous state and - // return false - if(!r || !bvc.m_added_to_main_chain) + // verify that main chain has front of alt chain's parent block + if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id)) { - LOG_PRINT_L1("Failed to switch to alternative blockchain"); - - // rollback_blockchain_switching should be moved to two different - // functions: rollback and apply_chain, but for now we pretend it is - // just the latter (because the rollback was done above). - rollback_blockchain_switching(disconnected_chain, m_db->height()); - - // FIXME: Why do we keep invalid blocks around? Possibly in case we hear - // about them again so we can immediately dismiss them, but needs some - // looking into. - add_block_as_invalid(ch_ent->second, get_block_hash(ch_ent->second.bl)); - LOG_PRINT_L1("The block was inserted as invalid while connecting new alternative chain, block_id: " << get_block_hash(ch_ent->second.bl)); - m_alternative_chains.erase(ch_ent); - - for(auto alt_ch_to_orph_iter = ++alt_ch_iter; alt_ch_to_orph_iter != alt_chain.end(); alt_ch_to_orph_iter++) - { - add_block_as_invalid((*alt_ch_iter)->second, (*alt_ch_iter)->first); - m_alternative_chains.erase(*alt_ch_to_orph_iter); - } - return false; + LOG_ERROR("Attempting to move to an alternate chain, but it doesn't appear to connect to the main chain!"); + return false; } - } - // if we're to keep the disconnected blocks, add them as alternates - if(!discard_disconnected_chain) - { - //pushing old chain as alternative chain - for (auto& old_ch_ent : disconnected_chain) + // pop blocks from the blockchain until the top block is the parent + // of the front block of the alt chain. + std::list disconnected_chain; + while (m_db->top_block_hash() != alt_chain.front()->second.bl.prev_id) { - block_verification_context bvc = boost::value_initialized(); - bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc); - if(!r) - { - LOG_PRINT_L1("Failed to push ex-main chain blocks to alternative chain "); - // previously this would fail the blockchain switching, but I don't - // think this is bad enough to warrant that. - } + block b = pop_block_from_blockchain(); + disconnected_chain.push_front(b); } - } - //removing alt_chain entries from alternative chain - BOOST_FOREACH(auto ch_ent, alt_chain) - { - m_alternative_chains.erase(ch_ent); - } + auto split_height = m_db->height(); - LOG_PRINT_GREEN("REORGANIZE SUCCESS! on height: " << split_height << ", new blockchain size: " << m_db->height(), LOG_LEVEL_0); - return true; + //connecting new alternative chain + for(auto alt_ch_iter = alt_chain.begin(); alt_ch_iter != alt_chain.end(); alt_ch_iter++) + { + auto ch_ent = *alt_ch_iter; + block_verification_context bvc = boost::value_initialized(); + + // add block to main chain + bool r = handle_block_to_main_chain(ch_ent->second.bl, bvc); + + // if adding block to main chain failed, rollback to previous state and + // return false + if(!r || !bvc.m_added_to_main_chain) + { + LOG_PRINT_L1("Failed to switch to alternative blockchain"); + + // rollback_blockchain_switching should be moved to two different + // functions: rollback and apply_chain, but for now we pretend it is + // just the latter (because the rollback was done above). + rollback_blockchain_switching(disconnected_chain, m_db->height()); + + // FIXME: Why do we keep invalid blocks around? Possibly in case we hear + // about them again so we can immediately dismiss them, but needs some + // looking into. + add_block_as_invalid(ch_ent->second, get_block_hash(ch_ent->second.bl)); + LOG_PRINT_L1("The block was inserted as invalid while connecting new alternative chain, block_id: " << get_block_hash(ch_ent->second.bl)); + m_alternative_chains.erase(ch_ent); + + for(auto alt_ch_to_orph_iter = ++alt_ch_iter; alt_ch_to_orph_iter != alt_chain.end(); alt_ch_to_orph_iter++) + { + add_block_as_invalid((*alt_ch_iter)->second, (*alt_ch_iter)->first); + m_alternative_chains.erase(*alt_ch_to_orph_iter); + } + return false; + } + } + + // if we're to keep the disconnected blocks, add them as alternates + if(!discard_disconnected_chain) + { + //pushing old chain as alternative chain + for (auto& old_ch_ent : disconnected_chain) + { + block_verification_context bvc = boost::value_initialized(); + bool r = handle_alternative_block(old_ch_ent, get_block_hash(old_ch_ent), bvc); + if(!r) + { + LOG_PRINT_L1("Failed to push ex-main chain blocks to alternative chain "); + // previously this would fail the blockchain switching, but I don't + // think this is bad enough to warrant that. + } + } + } + + //removing alt_chain entries from alternative chain + BOOST_FOREACH(auto ch_ent, alt_chain) + { + m_alternative_chains.erase(ch_ent); + } + + LOG_PRINT_GREEN("REORGANIZE SUCCESS! on height: " << split_height << ", new blockchain size: " << m_db->height(), LOG_LEVEL_0); + return true; } //------------------------------------------------------------------ // This function calculates the difficulty target for the block being added to // an alternate chain. difficulty_type Blockchain::get_next_difficulty_for_alternative_chain(const std::list& alt_chain, block_extended_info& bei) const { - LOG_PRINT_L3("Blockchain::" << __func__); - std::vector timestamps; - std::vector cumulative_difficulties; + LOG_PRINT_L3("Blockchain::" << __func__); + std::vector timestamps; + std::vector cumulative_difficulties; - // if the alt chain isn't long enough to calculate the difficulty target - // based on its blocks alone, need to get more blocks from the main chain - if(alt_chain.size()< DIFFICULTY_BLOCKS_COUNT) - { - CRITICAL_REGION_LOCAL(m_blockchain_lock); - - // Figure out start and stop offsets for main chain blocks - size_t main_chain_stop_offset = alt_chain.size() ? alt_chain.front()->second.height : bei.height; - size_t main_chain_count = DIFFICULTY_BLOCKS_COUNT - std::min(static_cast(DIFFICULTY_BLOCKS_COUNT), alt_chain.size()); - main_chain_count = std::min(main_chain_count, main_chain_stop_offset); - size_t main_chain_start_offset = main_chain_stop_offset - main_chain_count; - - if(!main_chain_start_offset) - ++main_chain_start_offset; //skip genesis block - - // get difficulties and timestamps from relevant main chain blocks - for(; main_chain_start_offset < main_chain_stop_offset; ++main_chain_start_offset) + // if the alt chain isn't long enough to calculate the difficulty target + // based on its blocks alone, need to get more blocks from the main chain + if(alt_chain.size()< DIFFICULTY_BLOCKS_COUNT) { - timestamps.push_back(m_db->get_block_timestamp(main_chain_start_offset)); - cumulative_difficulties.push_back(m_db->get_block_cumulative_difficulty(main_chain_start_offset)); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + + // Figure out start and stop offsets for main chain blocks + size_t main_chain_stop_offset = alt_chain.size() ? alt_chain.front()->second.height : bei.height; + size_t main_chain_count = DIFFICULTY_BLOCKS_COUNT - std::min(static_cast(DIFFICULTY_BLOCKS_COUNT), alt_chain.size()); + main_chain_count = std::min(main_chain_count, main_chain_stop_offset); + size_t main_chain_start_offset = main_chain_stop_offset - main_chain_count; + + if(!main_chain_start_offset) + ++main_chain_start_offset; //skip genesis block + + // get difficulties and timestamps from relevant main chain blocks + for(; main_chain_start_offset < main_chain_stop_offset; ++main_chain_start_offset) + { + timestamps.push_back(m_db->get_block_timestamp(main_chain_start_offset)); + cumulative_difficulties.push_back(m_db->get_block_cumulative_difficulty(main_chain_start_offset)); + } + + // make sure we haven't accidentally grabbed too many blocks...maybe don't need this check? + CHECK_AND_ASSERT_MES((alt_chain.size() + timestamps.size()) <= DIFFICULTY_BLOCKS_COUNT, false, "Internal error, alt_chain.size()[" << alt_chain.size() << "] + vtimestampsec.size()[" << timestamps.size() << "] NOT <= DIFFICULTY_WINDOW[]" << DIFFICULTY_BLOCKS_COUNT); + + for (auto it : alt_chain) + { + timestamps.push_back(it->second.bl.timestamp); + cumulative_difficulties.push_back(it->second.cumulative_difficulty); + } + } + // if the alt chain is long enough for the difficulty calc, grab difficulties + // and timestamps from it alone + else + { + timestamps.resize(static_cast(DIFFICULTY_BLOCKS_COUNT)); + cumulative_difficulties.resize(static_cast(DIFFICULTY_BLOCKS_COUNT)); + size_t count = 0; + size_t max_i = timestamps.size()-1; + // get difficulties and timestamps from most recent blocks in alt chain + BOOST_REVERSE_FOREACH(auto it, alt_chain) + { + timestamps[max_i - count] = it->second.bl.timestamp; + cumulative_difficulties[max_i - count] = it->second.cumulative_difficulty; + count++; + if(count >= DIFFICULTY_BLOCKS_COUNT) + break; + } } - // make sure we haven't accidentally grabbed too many blocks...maybe don't need this check? - CHECK_AND_ASSERT_MES((alt_chain.size() + timestamps.size()) <= DIFFICULTY_BLOCKS_COUNT, false, - "Internal error, alt_chain.size()[" << alt_chain.size() - << "] + vtimestampsec.size()[" << timestamps.size() - << "] NOT <= DIFFICULTY_WINDOW[]" << DIFFICULTY_BLOCKS_COUNT - ); - - for (auto it : alt_chain) - { - timestamps.push_back(it->second.bl.timestamp); - cumulative_difficulties.push_back(it->second.cumulative_difficulty); - } - } - // if the alt chain is long enough for the difficulty calc, grab difficulties - // and timestamps from it alone - else - { - timestamps.resize(static_cast(DIFFICULTY_BLOCKS_COUNT)); - cumulative_difficulties.resize(static_cast(DIFFICULTY_BLOCKS_COUNT)); - size_t count = 0; - size_t max_i = timestamps.size()-1; - // get difficulties and timestamps from most recent blocks in alt chain - BOOST_REVERSE_FOREACH(auto it, alt_chain) - { - timestamps[max_i - count] = it->second.bl.timestamp; - cumulative_difficulties[max_i - count] = it->second.cumulative_difficulty; - count++; - if(count >= DIFFICULTY_BLOCKS_COUNT) - break; - } - } - - // calculate the difficulty target for the block and return it - return next_difficulty(timestamps, cumulative_difficulties); + // calculate the difficulty target for the block and return it + return next_difficulty(timestamps, cumulative_difficulties); } //------------------------------------------------------------------ // This function does a sanity check on basic things that all miner @@ -817,85 +926,82 @@ difficulty_type Blockchain::get_next_difficulty_for_alternative_chain(const std: // a non-overflowing tx amount (dubious necessity on this check) bool Blockchain::prevalidate_miner_transaction(const block& b, uint64_t height) { - LOG_PRINT_L3("Blockchain::" << __func__); - CHECK_AND_ASSERT_MES(b.miner_tx.vin.size() == 1, false, "coinbase transaction in the block has no inputs"); - CHECK_AND_ASSERT_MES(b.miner_tx.vin[0].type() == typeid(txin_gen), false, "coinbase transaction in the block has the wrong type"); - if(boost::get(b.miner_tx.vin[0]).height != height) - { - LOG_PRINT_RED_L1("The miner transaction in block has invalid height: " << boost::get(b.miner_tx.vin[0]).height << ", expected: " << height); - return false; - } - CHECK_AND_ASSERT_MES(b.miner_tx.unlock_time == height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW, - false, - "coinbase transaction transaction has the wrong unlock time=" << b.miner_tx.unlock_time << ", expected " << height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW); + LOG_PRINT_L3("Blockchain::" << __func__); + CHECK_AND_ASSERT_MES(b.miner_tx.vin.size() == 1, false, "coinbase transaction in the block has no inputs"); + CHECK_AND_ASSERT_MES(b.miner_tx.vin[0].type() == typeid(txin_gen), false, "coinbase transaction in the block has the wrong type"); + if(boost::get(b.miner_tx.vin[0]).height != height) + { + LOG_PRINT_RED_L1("The miner transaction in block has invalid height: " << boost::get(b.miner_tx.vin[0]).height << ", expected: " << height); + return false; + } + CHECK_AND_ASSERT_MES(b.miner_tx.unlock_time == height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW, false, "coinbase transaction transaction has the wrong unlock time=" << b.miner_tx.unlock_time << ", expected " << height + CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW); + //check outs overflow + //NOTE: not entirely sure this is necessary, given that this function is + // designed simply to make sure the total amount for a transaction + // does not overflow a uint64_t, and this transaction *is* a uint64_t... + if(!check_outs_overflow(b.miner_tx)) + { + LOG_PRINT_RED_L1("miner transaction has money overflow in block " << get_block_hash(b)); + return false; + } - //check outs overflow - //NOTE: not entirely sure this is necessary, given that this function is - // designed simply to make sure the total amount for a transaction - // does not overflow a uint64_t, and this transaction *is* a uint64_t... - if(!check_outs_overflow(b.miner_tx)) - { - LOG_PRINT_RED_L1("miner transaction has money overflow in block " << get_block_hash(b)); - return false; - } - - return true; + return true; } //------------------------------------------------------------------ // This function validates the miner transaction reward bool Blockchain::validate_miner_transaction(const block& b, size_t cumulative_block_size, uint64_t fee, uint64_t& base_reward, uint64_t already_generated_coins) { - LOG_PRINT_L3("Blockchain::" << __func__); - //validate reward - uint64_t money_in_use = 0; - BOOST_FOREACH(auto& o, b.miner_tx.vout) + LOG_PRINT_L3("Blockchain::" << __func__); + //validate reward + uint64_t money_in_use = 0; + BOOST_FOREACH(auto& o, b.miner_tx.vout) money_in_use += o.amount; - std::vector last_blocks_sizes; - get_last_n_blocks_sizes(last_blocks_sizes, CRYPTONOTE_REWARD_BLOCKS_WINDOW); - if (!get_block_reward(epee::misc_utils::median(last_blocks_sizes), cumulative_block_size, already_generated_coins, base_reward)) { - LOG_PRINT_L1("block size " << cumulative_block_size << " is bigger than allowed for this blockchain"); - return false; - } - if(base_reward + fee < money_in_use) - { - LOG_PRINT_L1("coinbase transaction spend too much money (" << print_money(money_in_use) << "). Block reward is " << print_money(base_reward + fee) << "(" << print_money(base_reward) << "+" << print_money(fee) << ")"); - return false; - } - if(base_reward + fee != money_in_use) - { - LOG_PRINT_L1("coinbase transaction doesn't use full amount of block reward: spent: " - << money_in_use << ", block reward " << base_reward + fee << "(" << base_reward << "+" << fee << ")"); - return false; - } - return true; + std::vector last_blocks_sizes; + get_last_n_blocks_sizes(last_blocks_sizes, CRYPTONOTE_REWARD_BLOCKS_WINDOW); + if (!get_block_reward(epee::misc_utils::median(last_blocks_sizes), cumulative_block_size, already_generated_coins, base_reward)) + { + LOG_PRINT_L1("block size " << cumulative_block_size << " is bigger than allowed for this blockchain"); + return false; + } + if(base_reward + fee < money_in_use) + { + LOG_PRINT_L1("coinbase transaction spend too much money (" << print_money(money_in_use) << "). Block reward is " << print_money(base_reward + fee) << "(" << print_money(base_reward) << "+" << print_money(fee) << ")"); + return false; + } + if(base_reward + fee != money_in_use) + { + LOG_PRINT_L1("coinbase transaction doesn't use full amount of block reward: spent: " << money_in_use << ", block reward " << base_reward + fee << "(" << base_reward << "+" << fee << ")"); + return false; + } + return true; } //------------------------------------------------------------------ // get the block sizes of the last blocks, starting at // and return by reference . void Blockchain::get_last_n_blocks_sizes(std::vector& sz, size_t count) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - auto h = m_db->height(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + auto h = m_db->height(); - // this function is meaningless for an empty blockchain...granted it should never be empty - if(h == 0) - return; + // this function is meaningless for an empty blockchain...granted it should never be empty + if(h == 0) + return; - // add size of last blocks to vector (or less, if blockchain size < count) - size_t start_offset = h - std::min(h, count); - for(size_t i = start_offset; i < h; i++) - { - sz.push_back(m_db->get_block_size(i)); - } + // add size of last blocks to vector (or less, if blockchain size < count) + size_t start_offset = h - std::min(h, count); + for(size_t i = start_offset; i < h; i++) + { + sz.push_back(m_db->get_block_size(i)); + } } //------------------------------------------------------------------ uint64_t Blockchain::get_current_cumulative_blocksize_limit() const { - LOG_PRINT_L3("Blockchain::" << __func__); - return m_current_block_cumul_sz_limit; + LOG_PRINT_L3("Blockchain::" << __func__); + return m_current_block_cumul_sz_limit; } //------------------------------------------------------------------ //TODO: This function only needed minor modification to work with BlockchainDB, @@ -909,146 +1015,160 @@ uint64_t Blockchain::get_current_cumulative_blocksize_limit() const // in a lot of places. That flag is not referenced in any of the code // nor any of the makefiles, howeve. Need to look into whether or not it's // necessary at all. -bool Blockchain::create_block_template(block& b, const account_public_address& miner_address, difficulty_type& diffic, uint64_t& height, const blobdata& ex_nonce) const +bool Blockchain::create_block_template(block& b, const account_public_address& miner_address, difficulty_type& diffic, uint64_t& height, const blobdata& ex_nonce) { - LOG_PRINT_L3("Blockchain::" << __func__); - size_t median_size; - uint64_t already_generated_coins; + LOG_PRINT_L3("Blockchain::" << __func__); + size_t median_size; + uint64_t already_generated_coins; - CRITICAL_REGION_BEGIN(m_blockchain_lock); - b.major_version = CURRENT_BLOCK_MAJOR_VERSION; - b.minor_version = CURRENT_BLOCK_MINOR_VERSION; - b.prev_id = get_tail_id(); - b.timestamp = time(NULL); + CRITICAL_REGION_BEGIN(m_blockchain_lock); + b.major_version = CURRENT_BLOCK_MAJOR_VERSION; + b.minor_version = CURRENT_BLOCK_MINOR_VERSION; + b.prev_id = get_tail_id(); + b.timestamp = time(NULL); - height = m_db->height(); - diffic = get_difficulty_for_next_block(); - CHECK_AND_ASSERT_MES(diffic, false, "difficulty owverhead."); + height = m_db->height(); + diffic = get_difficulty_for_next_block(); + CHECK_AND_ASSERT_MES(diffic, false, "difficulty owverhead."); - median_size = m_current_block_cumul_sz_limit / 2; - already_generated_coins = m_db->get_block_already_generated_coins(height - 1); + median_size = m_current_block_cumul_sz_limit / 2; + already_generated_coins = m_db->get_block_already_generated_coins(height - 1); - CRITICAL_REGION_END(); + CRITICAL_REGION_END(); - size_t txs_size; - uint64_t fee; - if (!m_tx_pool.fill_block_template(b, median_size, already_generated_coins, txs_size, fee)) { - return false; - } + size_t txs_size; + uint64_t fee; + if (!m_tx_pool.fill_block_template(b, median_size, already_generated_coins, txs_size, fee)) + { + return false; + } #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) - size_t real_txs_size = 0; - uint64_t real_fee = 0; - CRITICAL_REGION_BEGIN(m_tx_pool.m_transactions_lock); - BOOST_FOREACH(crypto::hash &cur_hash, b.tx_hashes) { - auto cur_res = m_tx_pool.m_transactions.find(cur_hash); - if (cur_res == m_tx_pool.m_transactions.end()) { - LOG_ERROR("Creating block template: error: transaction not found"); - continue; + size_t real_txs_size = 0; + uint64_t real_fee = 0; + CRITICAL_REGION_BEGIN(m_tx_pool.m_transactions_lock); + BOOST_FOREACH(crypto::hash &cur_hash, b.tx_hashes) + { + auto cur_res = m_tx_pool.m_transactions.find(cur_hash); + if (cur_res == m_tx_pool.m_transactions.end()) + { + LOG_ERROR("Creating block template: error: transaction not found"); + continue; + } + tx_memory_pool::tx_details &cur_tx = cur_res->second; + real_txs_size += cur_tx.blob_size; + real_fee += cur_tx.fee; + if (cur_tx.blob_size != get_object_blobsize(cur_tx.tx)) + { + LOG_ERROR("Creating block template: error: invalid transaction size"); + } + uint64_t inputs_amount; + if (!get_inputs_money_amount(cur_tx.tx, inputs_amount)) + { + LOG_ERROR("Creating block template: error: cannot get inputs amount"); + } + else if (cur_tx.fee != inputs_amount - get_outs_money_amount(cur_tx.tx)) + { + LOG_ERROR("Creating block template: error: invalid fee"); + } } - tx_memory_pool::tx_details &cur_tx = cur_res->second; - real_txs_size += cur_tx.blob_size; - real_fee += cur_tx.fee; - if (cur_tx.blob_size != get_object_blobsize(cur_tx.tx)) { - LOG_ERROR("Creating block template: error: invalid transaction size"); + if (txs_size != real_txs_size) + { + LOG_ERROR("Creating block template: error: wrongly calculated transaction size"); } - uint64_t inputs_amount; - if (!get_inputs_money_amount(cur_tx.tx, inputs_amount)) { - LOG_ERROR("Creating block template: error: cannot get inputs amount"); - } else if (cur_tx.fee != inputs_amount - get_outs_money_amount(cur_tx.tx)) { - LOG_ERROR("Creating block template: error: invalid fee"); + if (fee != real_fee) + { + LOG_ERROR("Creating block template: error: wrongly calculated fee"); } - } - if (txs_size != real_txs_size) { - LOG_ERROR("Creating block template: error: wrongly calculated transaction size"); - } - if (fee != real_fee) { - LOG_ERROR("Creating block template: error: wrongly calculated fee"); - } - CRITICAL_REGION_END(); - LOG_PRINT_L1("Creating block template: height " << height << - ", median size " << median_size << - ", already generated coins " << already_generated_coins << - ", transaction size " << txs_size << - ", fee " << fee); + CRITICAL_REGION_END(); + LOG_PRINT_L1("Creating block template: height " << height << + ", median size " << median_size << + ", already generated coins " << already_generated_coins << + ", transaction size " << txs_size << + ", fee " << fee); #endif - /* + /* two-phase miner transaction generation: we don't know exact block size until we prepare block, but we don't know reward until we know block size, so first miner transaction generated with fake amount of money, and with phase we know think we know expected block size - */ - //make blocks coin-base tx looks close to real coinbase tx to get truthful blob size - bool r = construct_miner_tx(height, median_size, already_generated_coins, txs_size, fee, miner_address, b.miner_tx, ex_nonce, 11); - CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, first chance"); - size_t cumulative_size = txs_size + get_object_blobsize(b.miner_tx); + */ + //make blocks coin-base tx looks close to real coinbase tx to get truthful blob size + bool r = construct_miner_tx(height, median_size, already_generated_coins, txs_size, fee, miner_address, b.miner_tx, ex_nonce, 11); + CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, first chance"); + size_t cumulative_size = txs_size + get_object_blobsize(b.miner_tx); #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) - LOG_PRINT_L1("Creating block template: miner tx size " << get_object_blobsize(b.miner_tx) << - ", cumulative size " << cumulative_size); + LOG_PRINT_L1("Creating block template: miner tx size " << get_object_blobsize(b.miner_tx) << + ", cumulative size " << cumulative_size); #endif - for (size_t try_count = 0; try_count != 10; ++try_count) { - r = construct_miner_tx(height, median_size, already_generated_coins, cumulative_size, fee, miner_address, b.miner_tx, ex_nonce, 11); + for (size_t try_count = 0; try_count != 10; ++try_count) + { + r = construct_miner_tx(height, median_size, already_generated_coins, cumulative_size, fee, miner_address, b.miner_tx, ex_nonce, 11); - CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, second chance"); - size_t coinbase_blob_size = get_object_blobsize(b.miner_tx); - if (coinbase_blob_size > cumulative_size - txs_size) { - cumulative_size = txs_size + coinbase_blob_size; + CHECK_AND_ASSERT_MES(r, false, "Failed to construc miner tx, second chance"); + size_t coinbase_blob_size = get_object_blobsize(b.miner_tx); + if (coinbase_blob_size > cumulative_size - txs_size) + { + cumulative_size = txs_size + coinbase_blob_size; #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) - LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << - ", cumulative size " << cumulative_size << " is greater then before"); + LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << + ", cumulative size " << cumulative_size << " is greater then before"); #endif - continue; - } - - if (coinbase_blob_size < cumulative_size - txs_size) { - size_t delta = cumulative_size - txs_size - coinbase_blob_size; -#if defined(DEBUG_CREATE_BLOCK_TEMPLATE) - LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << - ", cumulative size " << txs_size + coinbase_blob_size << - " is less then before, adding " << delta << " zero bytes"); -#endif - b.miner_tx.extra.insert(b.miner_tx.extra.end(), delta, 0); - //here could be 1 byte difference, because of extra field counter is varint, and it can become from 1-byte len to 2-bytes len. - if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx)) { - CHECK_AND_ASSERT_MES(cumulative_size + 1 == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " + 1 is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx)); - b.miner_tx.extra.resize(b.miner_tx.extra.size() - 1); - if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx)) { - //fuck, not lucky, -1 makes varint-counter size smaller, in that case we continue to grow with cumulative_size - LOG_PRINT_RED("Miner tx creation has no luck with delta_extra size = " << delta << " and " << delta - 1 , LOG_LEVEL_2); - cumulative_size += delta - 1; - continue; + continue; } - LOG_PRINT_GREEN("Setting extra for block: " << b.miner_tx.extra.size() << ", try_count=" << try_count, LOG_LEVEL_1); - } - } - CHECK_AND_ASSERT_MES(cumulative_size == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx)); + + if (coinbase_blob_size < cumulative_size - txs_size) + { + size_t delta = cumulative_size - txs_size - coinbase_blob_size; #if defined(DEBUG_CREATE_BLOCK_TEMPLATE) - LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << - ", cumulative size " << cumulative_size << " is now good"); + LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << + ", cumulative size " << txs_size + coinbase_blob_size << + " is less then before, adding " << delta << " zero bytes"); #endif - return true; - } - LOG_ERROR("Failed to create_block_template with " << 10 << " tries"); - return false; + b.miner_tx.extra.insert(b.miner_tx.extra.end(), delta, 0); + //here could be 1 byte difference, because of extra field counter is varint, and it can become from 1-byte len to 2-bytes len. + if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx)) + { + CHECK_AND_ASSERT_MES(cumulative_size + 1 == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " + 1 is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx)); + b.miner_tx.extra.resize(b.miner_tx.extra.size() - 1); + if (cumulative_size != txs_size + get_object_blobsize(b.miner_tx)) + { + //fuck, not lucky, -1 makes varint-counter size smaller, in that case we continue to grow with cumulative_size + LOG_PRINT_RED("Miner tx creation has no luck with delta_extra size = " << delta << " and " << delta - 1 , LOG_LEVEL_2); + cumulative_size += delta - 1; + continue; + } + LOG_PRINT_GREEN("Setting extra for block: " << b.miner_tx.extra.size() << ", try_count=" << try_count, LOG_LEVEL_1); + } + } + CHECK_AND_ASSERT_MES(cumulative_size == txs_size + get_object_blobsize(b.miner_tx), false, "unexpected case: cumulative_size=" << cumulative_size << " is not equal txs_cumulative_size=" << txs_size << " + get_object_blobsize(b.miner_tx)=" << get_object_blobsize(b.miner_tx)); +#if defined(DEBUG_CREATE_BLOCK_TEMPLATE) + LOG_PRINT_L1("Creating block template: miner tx size " << coinbase_blob_size << + ", cumulative size " << cumulative_size << " is now good"); +#endif + return true; + } + LOG_ERROR("Failed to create_block_template with " << 10 << " tries"); + return false; } //------------------------------------------------------------------ // for an alternate chain, get the timestamps from the main chain to complete // the needed number of timestamps for the BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW. bool Blockchain::complete_timestamps_vector(uint64_t start_top_height, std::vector& timestamps) { - LOG_PRINT_L3("Blockchain::" << __func__); + LOG_PRINT_L3("Blockchain::" << __func__); - if(timestamps.size() >= BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW) + if(timestamps.size() >= BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW) + return true; + + CRITICAL_REGION_LOCAL(m_blockchain_lock); + size_t need_elements = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - timestamps.size(); + CHECK_AND_ASSERT_MES(start_top_height < m_db->height(), false, "internal error: passed start_height not < " << " m_db->height() -- " << start_top_height << " >= " << m_db->height()); + size_t stop_offset = start_top_height > need_elements ? start_top_height - need_elements : 0; + while (start_top_height != stop_offset) + { + timestamps.push_back(m_db->get_block_timestamp(start_top_height)); + --start_top_height; + } return true; - - CRITICAL_REGION_LOCAL(m_blockchain_lock); - size_t need_elements = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - timestamps.size(); - CHECK_AND_ASSERT_MES(start_top_height < m_db->height(), false, "internal error: passed start_height not < " << " m_db->height() -- " << start_top_height << " >= " << m_db->height()); - size_t stop_offset = start_top_height > need_elements ? start_top_height - need_elements : 0; - while (start_top_height != stop_offset) - { - timestamps.push_back(m_db->get_block_timestamp(start_top_height)); - --start_top_height; - } - return true; } //------------------------------------------------------------------ // If a block is to be added and its parent block is not the current @@ -1059,228 +1179,215 @@ bool Blockchain::complete_timestamps_vector(uint64_t start_top_height, std::vect // a long forked chain eventually. bool Blockchain::handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - - uint64_t block_height = get_block_height(b); - if(0 == block_height) - { - LOG_PRINT_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative), but miner tx says height is 0."); - bvc.m_verifivation_failed = true; - return false; - } - // this basically says if the blockchain is smaller than the first - // checkpoint then alternate blocks are allowed. Alternatively, if the - // last checkpoint *before* the end of the current chain is also before - // the block to be added, then this is fine. - if (!m_checkpoints.is_alternative_block_allowed(get_current_blockchain_height(), block_height)) - { - LOG_PRINT_RED_L1("Block with id: " << id - << std::endl << " can't be accepted for alternative chain, block height: " << block_height - << std::endl << " blockchain height: " << get_current_blockchain_height()); - bvc.m_verifivation_failed = true; - return false; - } - - //block is not related with head of main chain - //first of all - look in alternative chains container - auto it_prev = m_alternative_chains.find(b.prev_id); - bool parent_in_main = m_db->block_exists(b.prev_id); - if(it_prev != m_alternative_chains.end() || parent_in_main) - { - //we have new block in alternative chain - - //build alternative subchain, front -> mainchain, back -> alternative head - blocks_ext_by_hash::iterator alt_it = it_prev; //m_alternative_chains.find() - std::list alt_chain; - std::vector timestamps; - while(alt_it != m_alternative_chains.end()) + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + m_timestamps_and_difficulties_height = 0; + uint64_t block_height = get_block_height(b); + if(0 == block_height) { - alt_chain.push_front(alt_it); - timestamps.push_back(alt_it->second.bl.timestamp); - alt_it = m_alternative_chains.find(alt_it->second.bl.prev_id); - } - - // if block to be added connects to known blocks that aren't part of the - // main chain -- that is, if we're adding on to an alternate chain - if(alt_chain.size()) - { - // make sure alt chain doesn't somehow start past the end of the main chain - CHECK_AND_ASSERT_MES(m_db->height() > alt_chain.front()->second.height, false, "main blockchain wrong height"); - - // make sure that the blockchain contains the block that should connect - // this alternate chain with it. - if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id)) - { - LOG_PRINT_L1("alternate chain does not appear to connect to main chain..."); + LOG_PRINT_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative), but miner tx says height is 0."); + bvc.m_verifivation_failed = true; return false; - } - - // make sure block connects correctly to the main chain - auto h = m_db->get_block_hash_from_height(alt_chain.front()->second.height - 1); - CHECK_AND_ASSERT_MES(h == alt_chain.front()->second.bl.prev_id, false, "alternative chain has wrong connection to main chain"); - complete_timestamps_vector(m_db->get_block_height(alt_chain.front()->second.bl.prev_id), timestamps); } - // if block not associated with known alternate chain - else + // this basically says if the blockchain is smaller than the first + // checkpoint then alternate blocks are allowed. Alternatively, if the + // last checkpoint *before* the end of the current chain is also before + // the block to be added, then this is fine. + if (!m_checkpoints.is_alternative_block_allowed(get_current_blockchain_height(), block_height)) { - // if block parent is not part of main chain or an alternate chain, - // we ignore it - CHECK_AND_ASSERT_MES(parent_in_main, false, "internal error: broken imperative condition it_main_prev != m_blocks_index.end()"); - - complete_timestamps_vector(m_db->get_block_height(b.prev_id), timestamps); + LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " can't be accepted for alternative chain, block height: " << block_height << std::endl << " blockchain height: " << get_current_blockchain_height()); + bvc.m_verifivation_failed = true; + return false; } - // verify that the block's timestamp is within the acceptable range - // (not earlier than the median of the last X blocks) - if(!check_block_timestamp(timestamps, b)) + //block is not related with head of main chain + //first of all - look in alternative chains container + auto it_prev = m_alternative_chains.find(b.prev_id); + bool parent_in_main = m_db->block_exists(b.prev_id); + if(it_prev != m_alternative_chains.end() || parent_in_main) { - LOG_PRINT_RED_L1("Block with id: " << id - << std::endl << " for alternative chain, has invalid timestamp: " << b.timestamp); - bvc.m_verifivation_failed = true; - return false; - } + //we have new block in alternative chain - // FIXME: consider moving away from block_extended_info at some point - block_extended_info bei = boost::value_initialized(); - bei.bl = b; - bei.height = alt_chain.size() ? it_prev->second.height + 1 : m_db->get_block_height(b.prev_id) + 1; + //build alternative subchain, front -> mainchain, back -> alternative head + blocks_ext_by_hash::iterator alt_it = it_prev; //m_alternative_chains.find() + std::list alt_chain; + std::vector timestamps; + while(alt_it != m_alternative_chains.end()) + { + alt_chain.push_front(alt_it); + timestamps.push_back(alt_it->second.bl.timestamp); + alt_it = m_alternative_chains.find(alt_it->second.bl.prev_id); + } - bool is_a_checkpoint; - if(!m_checkpoints.check_block(bei.height, id, is_a_checkpoint)) - { - LOG_ERROR("CHECKPOINT VALIDATION FAILED"); - bvc.m_verifivation_failed = true; - return false; - } + // if block to be added connects to known blocks that aren't part of the + // main chain -- that is, if we're adding on to an alternate chain + if(alt_chain.size()) + { + // make sure alt chain doesn't somehow start past the end of the main chain + CHECK_AND_ASSERT_MES(m_db->height() > alt_chain.front()->second.height, false, "main blockchain wrong height"); - // Check the block's hash against the difficulty target for its alt chain - m_is_in_checkpoint_zone = false; - difficulty_type current_diff = get_next_difficulty_for_alternative_chain(alt_chain, bei); - CHECK_AND_ASSERT_MES(current_diff, false, "!!!!!!! DIFFICULTY OVERHEAD !!!!!!!"); - crypto::hash proof_of_work = null_hash; - get_block_longhash(bei.bl, proof_of_work, bei.height); - if(!check_hash(proof_of_work, current_diff)) - { - LOG_PRINT_RED_L1("Block with id: " << id - << std::endl << " for alternative chain, does not have enough proof of work: " << proof_of_work - << std::endl << " expected difficulty: " << current_diff); - bvc.m_verifivation_failed = true; - return false; - } + // make sure that the blockchain contains the block that should connect + // this alternate chain with it. + if (!m_db->block_exists(alt_chain.front()->second.bl.prev_id)) + { + LOG_PRINT_L1("alternate chain does not appear to connect to main chain..."); + return false; + } - if(!prevalidate_miner_transaction(b, bei.height)) - { - LOG_PRINT_RED_L1("Block with id: " << epee::string_tools::pod_to_hex(id) - << " (as alternative) has incorrect miner transaction."); - bvc.m_verifivation_failed = true; - return false; + // make sure block connects correctly to the main chain + auto h = m_db->get_block_hash_from_height(alt_chain.front()->second.height - 1); + CHECK_AND_ASSERT_MES(h == alt_chain.front()->second.bl.prev_id, false, "alternative chain has wrong connection to main chain"); + complete_timestamps_vector(m_db->get_block_height(alt_chain.front()->second.bl.prev_id), timestamps); + } + // if block not associated with known alternate chain + else + { + // if block parent is not part of main chain or an alternate chain, + // we ignore it + CHECK_AND_ASSERT_MES(parent_in_main, false, "internal error: broken imperative condition it_main_prev != m_blocks_index.end()"); - } + complete_timestamps_vector(m_db->get_block_height(b.prev_id), timestamps); + } - // FIXME: - // this brings up an interesting point: consider allowing to get block - // difficulty both by height OR by hash, not just height. - difficulty_type main_chain_cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->height() - 1); - if (alt_chain.size()) - { - bei.cumulative_difficulty = it_prev->second.cumulative_difficulty; + // verify that the block's timestamp is within the acceptable range + // (not earlier than the median of the last X blocks) + if(!check_block_timestamp(timestamps, b)) + { + LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, has invalid timestamp: " << b.timestamp); + bvc.m_verifivation_failed = true; + return false; + } + + // FIXME: consider moving away from block_extended_info at some point + block_extended_info bei = boost::value_initialized(); + bei.bl = b; + bei.height = alt_chain.size() ? it_prev->second.height + 1 : m_db->get_block_height(b.prev_id) + 1; + + bool is_a_checkpoint; + if(!m_checkpoints.check_block(bei.height, id, is_a_checkpoint)) + { + LOG_ERROR("CHECKPOINT VALIDATION FAILED"); + bvc.m_verifivation_failed = true; + return false; + } + + // Check the block's hash against the difficulty target for its alt chain + m_is_in_checkpoint_zone = false; + difficulty_type current_diff = get_next_difficulty_for_alternative_chain(alt_chain, bei); + CHECK_AND_ASSERT_MES(current_diff, false, "!!!!!!! DIFFICULTY OVERHEAD !!!!!!!"); + crypto::hash proof_of_work = null_hash; + get_block_longhash(bei.bl, proof_of_work, bei.height); + if(!check_hash(proof_of_work, current_diff)) + { + LOG_PRINT_RED_L1("Block with id: " << id << std::endl << " for alternative chain, does not have enough proof of work: " << proof_of_work << std::endl << " expected difficulty: " << current_diff); + bvc.m_verifivation_failed = true; + return false; + } + + if(!prevalidate_miner_transaction(b, bei.height)) + { + LOG_PRINT_RED_L1("Block with id: " << epee::string_tools::pod_to_hex(id) << " (as alternative) has incorrect miner transaction."); + bvc.m_verifivation_failed = true; + return false; + + } + + // FIXME: + // this brings up an interesting point: consider allowing to get block + // difficulty both by height OR by hash, not just height. + difficulty_type main_chain_cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->height() - 1); + if (alt_chain.size()) + { + bei.cumulative_difficulty = it_prev->second.cumulative_difficulty; + } + else + { + // passed-in block's previous block's cumulative difficulty, found on the main chain + bei.cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->get_block_height(b.prev_id)); + } + bei.cumulative_difficulty += current_diff; + + // add block to alternate blocks storage, + // as well as the current "alt chain" container + auto i_res = m_alternative_chains.insert(blocks_ext_by_hash::value_type(id, bei)); + CHECK_AND_ASSERT_MES(i_res.second, false, "insertion of new alternative block returned as it already exist"); + alt_chain.push_back(i_res.first); + + // FIXME: is it even possible for a checkpoint to show up not on the main chain? + if(is_a_checkpoint) + { + //do reorganize! + LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << ", checkpoint is found in alternative chain on height " << bei.height, LOG_LEVEL_0); + + bool r = switch_to_alternative_blockchain(alt_chain, true); + + bvc.m_added_to_main_chain = r; + bvc.m_verifivation_failed = !r; + + return r; + } + else if(main_chain_cumulative_difficulty < bei.cumulative_difficulty) //check if difficulty bigger then in main chain + { + //do reorganize! + LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << " with cum_difficulty " << m_db->get_block_cumulative_difficulty(m_db->height() - 1) << std::endl << " alternative blockchain size: " << alt_chain.size() << " with cum_difficulty " << bei.cumulative_difficulty, LOG_LEVEL_0); + + bool r = switch_to_alternative_blockchain(alt_chain, false); + if (r) + bvc.m_added_to_main_chain = true; + else + bvc.m_verifivation_failed = true; + return r; + } + else + { + LOG_PRINT_BLUE("----- BLOCK ADDED AS ALTERNATIVE ON HEIGHT " << bei.height << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "difficulty:\t" << current_diff, LOG_LEVEL_0); + return true; + } } else { - // passed-in block's previous block's cumulative difficulty, found on the main chain - bei.cumulative_difficulty = m_db->get_block_cumulative_difficulty(m_db->get_block_height(b.prev_id)); + //block orphaned + bvc.m_marked_as_orphaned = true; + LOG_PRINT_RED_L1("Block recognized as orphaned and rejected, id = " << id); } - bei.cumulative_difficulty += current_diff; - // add block to alternate blocks storage, - // as well as the current "alt chain" container - auto i_res = m_alternative_chains.insert(blocks_ext_by_hash::value_type(id, bei)); - CHECK_AND_ASSERT_MES(i_res.second, false, "insertion of new alternative block returned as it already exist"); - alt_chain.push_back(i_res.first); - - // FIXME: is it even possible for a checkpoint to show up not on the main chain? - if(is_a_checkpoint) - { - //do reorganize! - LOG_PRINT_GREEN("###### REORGANIZE on height: " << alt_chain.front()->second.height << " of " << m_db->height() - 1 << - ", checkpoint is found in alternative chain on height " << bei.height, LOG_LEVEL_0); - - bool r = switch_to_alternative_blockchain(alt_chain, true); - - bvc.m_added_to_main_chain = r; - bvc.m_verifivation_failed = !r; - - return r; - } - else if(main_chain_cumulative_difficulty < bei.cumulative_difficulty) //check if difficulty bigger then in main chain - { - //do reorganize! - LOG_PRINT_GREEN("###### REORGANIZE on height: " - << alt_chain.front()->second.height << " of " << m_db->height() - 1 - << " with cum_difficulty " << m_db->get_block_cumulative_difficulty(m_db->height() - 1) - << std::endl << " alternative blockchain size: " << alt_chain.size() - << " with cum_difficulty " << bei.cumulative_difficulty, LOG_LEVEL_0 - ); - - bool r = switch_to_alternative_blockchain(alt_chain, false); - if(r) bvc.m_added_to_main_chain = true; - else bvc.m_verifivation_failed = true; - return r; - } - else - { - LOG_PRINT_BLUE("----- BLOCK ADDED AS ALTERNATIVE ON HEIGHT " << bei.height - << std::endl << "id:\t" << id - << std::endl << "PoW:\t" << proof_of_work - << std::endl << "difficulty:\t" << current_diff, LOG_LEVEL_0); - return true; - } - } - else - { - //block orphaned - bvc.m_marked_as_orphaned = true; - LOG_PRINT_RED_L1("Block recognized as orphaned and rejected, id = " << id); - } - - return true; + return true; } //------------------------------------------------------------------ bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list& blocks, std::list& txs) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - if(start_offset > m_db->height()) - return false; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + if(start_offset > m_db->height()) + return false; - if (!get_blocks(start_offset, count, blocks)) - { - return false; - } + if (!get_blocks(start_offset, count, blocks)) + { + return false; + } - for(const block& blk : blocks) - { - std::list missed_ids; - get_transactions(blk.tx_hashes, txs, missed_ids); - CHECK_AND_ASSERT_MES(!missed_ids.size(), false, "has missed transactions in own block in main blockchain"); - } + for(const block& blk : blocks) + { + std::list missed_ids; + get_transactions(blk.tx_hashes, txs, missed_ids); + CHECK_AND_ASSERT_MES(!missed_ids.size(), false, "has missed transactions in own block in main blockchain"); + } - return true; + return true; } //------------------------------------------------------------------ bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list& blocks) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - if(start_offset > m_db->height()) - return false; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + if(start_offset > m_db->height()) + return false; - for(size_t i = start_offset; i < start_offset + count && i <= m_db->height();i++) - { - blocks.push_back(m_db->get_block_from_height(i)); - } - return true; + for(size_t i = start_offset; i < start_offset + count && i <= m_db->height();i++) + { + blocks.push_back(m_db->get_block_from_height(i)); + } + return true; } //------------------------------------------------------------------ //TODO: This function *looks* like it won't need to be rewritten @@ -1288,67 +1395,68 @@ bool Blockchain::get_blocks(uint64_t start_offset, size_t count, std::list blocks; - get_blocks(arg.blocks, blocks, rsp.missed_ids); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + rsp.current_blockchain_height = get_current_blockchain_height(); + std::list blocks; + get_blocks(arg.blocks, blocks, rsp.missed_ids); - BOOST_FOREACH(const auto& bl, blocks) - { - std::list missed_tx_id; + BOOST_FOREACH(const auto& bl, blocks) + { + std::list missed_tx_id; + std::list txs; + get_transactions(bl.tx_hashes, txs, rsp.missed_ids); + CHECK_AND_ASSERT_MES(!missed_tx_id.size(), false, "Internal error: has missed missed_tx_id.size()=" << missed_tx_id.size() + << std::endl << "for block id = " << get_block_hash(bl)); + rsp.blocks.push_back(block_complete_entry()); + block_complete_entry& e = rsp.blocks.back(); + //pack block + e.block = t_serializable_object_to_blob(bl); + //pack transactions + BOOST_FOREACH(transaction& tx, txs) + e.txs.push_back(t_serializable_object_to_blob(tx)); + + } + //get another transactions, if need std::list txs; - get_transactions(bl.tx_hashes, txs, rsp.missed_ids); - CHECK_AND_ASSERT_MES(!missed_tx_id.size(), false, "Internal error: has missed missed_tx_id.size()=" << missed_tx_id.size() - << std::endl << "for block id = " << get_block_hash(bl)); - rsp.blocks.push_back(block_complete_entry()); - block_complete_entry& e = rsp.blocks.back(); - //pack block - e.block = t_serializable_object_to_blob(bl); - //pack transactions - BOOST_FOREACH(transaction& tx, txs) - e.txs.push_back(t_serializable_object_to_blob(tx)); - - } - //get another transactions, if need - std::list txs; - get_transactions(arg.txs, txs, rsp.missed_ids); - //pack aside transactions - BOOST_FOREACH(const auto& tx, txs) + get_transactions(arg.txs, txs, rsp.missed_ids); + //pack aside transactions + BOOST_FOREACH(const auto& tx, txs) rsp.txs.push_back(t_serializable_object_to_blob(tx)); - return true; + return true; } //------------------------------------------------------------------ bool Blockchain::get_alternative_blocks(std::list& blocks) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - BOOST_FOREACH(const auto& alt_bl, m_alternative_chains) - { - blocks.push_back(alt_bl.second.bl); - } - return true; + BOOST_FOREACH(const auto& alt_bl, m_alternative_chains) + { + blocks.push_back(alt_bl.second.bl); + } + return true; } //------------------------------------------------------------------ size_t Blockchain::get_alternative_blocks_count() const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_alternative_chains.size(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_alternative_chains.size(); } //------------------------------------------------------------------ // This function adds the output specified by to the result_outs container // unlocked and other such checks should be done by here. void Blockchain::add_out_to_get_random_outs(COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs, uint64_t amount, size_t i) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry& oen = *result_outs.outs.insert(result_outs.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry()); - oen.global_amount_index = i; - oen.out_key = m_db->get_output_key(amount, i); + COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry& oen = *result_outs.outs.insert(result_outs.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::out_entry()); + oen.global_amount_index = i; + output_data_t data = m_db->get_output_key(amount, i); + oen.out_key = data.pubkey; } //------------------------------------------------------------------ // This function takes an RPC request for mixins and creates an RPC response @@ -1357,72 +1465,72 @@ void Blockchain::add_out_to_get_random_outs(COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_A // in some cases bool Blockchain::get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::request& req, COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::response& res) const { - LOG_PRINT_L3("Blockchain::" << __func__); - srand(static_cast(time(NULL))); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + srand(static_cast(time(NULL))); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // for each amount that we need to get mixins for, get random outputs - // from BlockchainDB where is req.outs_count (number of mixins). - for (uint64_t amount : req.amounts) - { - // create outs_for_amount struct and populate amount field - COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs = *res.outs.insert(res.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount()); - result_outs.amount = amount; - - std::unordered_set seen_indices; - - // if there aren't enough outputs to mix with (or just enough), - // use all of them. Eventually this should become impossible. - if (m_db->get_num_outputs(amount) <= req.outs_count) + // for each amount that we need to get mixins for, get random outputs + // from BlockchainDB where is req.outs_count (number of mixins). + for (uint64_t amount : req.amounts) { - for (uint64_t i = 0; i < m_db->get_num_outputs(amount); i++) - { - // get tx_hash, tx_out_index from DB - tx_out_index toi = m_db->get_output_tx_and_index(amount, i); + // create outs_for_amount struct and populate amount field + COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount& result_outs = *res.outs.insert(res.outs.end(), COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::outs_for_amount()); + result_outs.amount = amount; - // if tx is unlocked, add output to result_outs - if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first))) + std::unordered_set seen_indices; + + // if there aren't enough outputs to mix with (or just enough), + // use all of them. Eventually this should become impossible. + if (m_db->get_num_outputs(amount) <= req.outs_count) { - add_out_to_get_random_outs(result_outs, amount, i); - } + for (uint64_t i = 0; i < m_db->get_num_outputs(amount); i++) + { + // get tx_hash, tx_out_index from DB + tx_out_index toi = m_db->get_output_tx_and_index(amount, i); - } + // if tx is unlocked, add output to result_outs + if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first))) + { + add_out_to_get_random_outs(result_outs, amount, i); + } + + } + } + else + { + // while we still need more mixins + auto num_outs = m_db->get_num_outputs(amount); + while (result_outs.outs.size() < req.outs_count) + { + // if we've gone through every possible output, we've gotten all we can + if (seen_indices.size() == num_outs) + { + break; + } + + // get a random output index from the DB. If we've already seen it, + // return to the top of the loop and try again, otherwise add it to the + // list of output indices we've seen. + uint64_t i = m_db->get_random_output(amount); + if (seen_indices.count(i)) + { + continue; + } + seen_indices.emplace(i); + + // get tx_hash, tx_out_index from DB + tx_out_index toi = m_db->get_output_tx_and_index(amount, i); + + // if the output's transaction is unlocked, add the output's index to + // our list. + if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first))) + { + add_out_to_get_random_outs(result_outs, amount, i); + } + } + } } - else - { - // while we still need more mixins - auto num_outs = m_db->get_num_outputs(amount); - while (result_outs.outs.size() < req.outs_count) - { - // if we've gone through every possible output, we've gotten all we can - if (seen_indices.size() == num_outs) - { - break; - } - - // get a random output index from the DB. If we've already seen it, - // return to the top of the loop and try again, otherwise add it to the - // list of output indices we've seen. - uint64_t i = m_db->get_random_output(amount); - if (seen_indices.count(i)) - { - continue; - } - seen_indices.emplace(i); - - // get tx_hash, tx_out_index from DB - tx_out_index toi = m_db->get_output_tx_and_index(amount, i); - - // if the output's transaction is unlocked, add the output's index to - // our list. - if (is_tx_spendtime_unlocked(m_db->get_tx_unlock_time(toi.first))) - { - add_out_to_get_random_outs(result_outs, amount, i); - } - } - } - } - return true; + return true; } //------------------------------------------------------------------ // This function takes a list of block hashes from another node @@ -1430,187 +1538,175 @@ bool Blockchain::get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUT // This is used to see what to send another node that needs to sync. bool Blockchain::find_blockchain_supplement(const std::list& qblock_ids, uint64_t& starter_offset) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // make sure the request includes at least the genesis block, otherwise - // how can we expect to sync from the client that the block list came from? - if(!qblock_ids.size() /*|| !req.m_total_height*/) - { - LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << qblock_ids.size() << /*", m_height=" << req.m_total_height <<*/ ", dropping connection"); - return false; - } - - // make sure that the last block in the request's block list matches - // the genesis block - auto gen_hash = m_db->get_block_hash_from_height(0); - if(qblock_ids.back() != gen_hash) - { - LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: genesis block missmatch: " << std::endl << "id: " - << qblock_ids.back() << ", " << std::endl << "expected: " << gen_hash - << "," << std::endl << " dropping connection"); - return false; - } - - // Find the first block the foreign chain has that we also have. - // Assume qblock_ids is in reverse-chronological order. - auto bl_it = qblock_ids.begin(); - uint64_t split_height = 0; - for(; bl_it != qblock_ids.end(); bl_it++) - { - try + // make sure the request includes at least the genesis block, otherwise + // how can we expect to sync from the client that the block list came from? + if(!qblock_ids.size() /*|| !req.m_total_height*/) { - split_height = m_db->get_block_height(*bl_it); - break; + LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: m_block_ids.size()=" << qblock_ids.size() << /*", m_height=" << req.m_total_height <<*/ ", dropping connection"); + return false; } - catch (const BLOCK_DNE& e) + + // make sure that the last block in the request's block list matches + // the genesis block + auto gen_hash = m_db->get_block_hash_from_height(0); + if(qblock_ids.back() != gen_hash) { - continue; + LOG_PRINT_L1("Client sent wrong NOTIFY_REQUEST_CHAIN: genesis block missmatch: " << std::endl << "id: " << qblock_ids.back() << ", " << std::endl << "expected: " << gen_hash << "," << std::endl << " dropping connection"); + return false; } - catch (const std::exception& e) + + // Find the first block the foreign chain has that we also have. + // Assume qblock_ids is in reverse-chronological order. + auto bl_it = qblock_ids.begin(); + uint64_t split_height = 0; + for(; bl_it != qblock_ids.end(); bl_it++) { - LOG_PRINT_L1("Non-critical error trying to find block by hash in BlockchainDB, hash: " << *bl_it); - return false; + try + { + split_height = m_db->get_block_height(*bl_it); + break; + } + catch (const BLOCK_DNE& e) + { + continue; + } + catch (const std::exception& e) + { + LOG_PRINT_L1("Non-critical error trying to find block by hash in BlockchainDB, hash: " << *bl_it); + return false; + } } - } - // this should be impossible, as we checked that we share the genesis block, - // but just in case... - if(bl_it == qblock_ids.end()) - { - LOG_PRINT_L1("Internal error handling connection, can't find split point"); - return false; - } + // this should be impossible, as we checked that we share the genesis block, + // but just in case... + if(bl_it == qblock_ids.end()) + { + LOG_PRINT_L1("Internal error handling connection, can't find split point"); + return false; + } - // if split_height remains 0, we didn't have any but the genesis block in common - // which is only fine if the blocks just have the genesis block - if(split_height == 0 && qblock_ids.size() > 1) - { - LOG_ERROR("Ours and foreign blockchain have only genesis block in common... o.O"); - return false; - } + // if split_height remains 0, we didn't have any but the genesis block in common + // which is only fine if the blocks just have the genesis block + if(split_height == 0 && qblock_ids.size() > 1) + { + LOG_ERROR("Ours and foreign blockchain have only genesis block in common... o.O"); + return false; + } - //we start to put block ids INCLUDING last known id, just to make other side be sure - starter_offset = split_height; - return true; + //we start to put block ids INCLUDING last known id, just to make other side be sure + starter_offset = split_height; + return true; } //------------------------------------------------------------------ uint64_t Blockchain::block_difficulty(uint64_t i) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - try - { - return m_db->get_block_difficulty(i); - } - catch (const BLOCK_DNE& e) - { - LOG_PRINT_L0("Attempted to get block difficulty for height above blockchain height"); - } - return 0; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + try + { + return m_db->get_block_difficulty(i); + } + catch (const BLOCK_DNE& e) + { + LOG_PRINT_L0("Attempted to get block difficulty for height above blockchain height"); + } + return 0; } //------------------------------------------------------------------ template bool Blockchain::get_blocks(const t_ids_container& block_ids, t_blocks_container& blocks, t_missed_container& missed_bs) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - for (const auto& block_hash : block_ids) - { - try + for (const auto& block_hash : block_ids) { - blocks.push_back(m_db->get_block(block_hash)); + try + { + blocks.push_back(m_db->get_block(block_hash)); + } + catch (const BLOCK_DNE& e) + { + missed_bs.push_back(block_hash); + } + catch (const std::exception& e) + { + return false; + } } - catch (const BLOCK_DNE& e) - { - missed_bs.push_back(block_hash); - } - catch (const std::exception& e) - { - return false; - } - } - return true; + return true; } //------------------------------------------------------------------ template bool Blockchain::get_transactions(const t_ids_container& txs_ids, t_tx_container& txs, t_missed_container& missed_txs) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - for (const auto& tx_hash : txs_ids) - { - try + for (const auto& tx_hash : txs_ids) { - txs.push_back(m_db->get_tx(tx_hash)); + try + { + txs.push_back(m_db->get_tx(tx_hash)); + } + catch (const TX_DNE& e) + { + missed_txs.push_back(tx_hash); + } + //FIXME: is this the correct way to handle this? + catch (const std::exception& e) + { + return false; + } } - catch (const TX_DNE& e) - { - missed_txs.push_back(tx_hash); - } - //FIXME: is this the correct way to handle this? - catch (const std::exception& e) - { - return false; - } - } - return true; + return true; } //------------------------------------------------------------------ void Blockchain::print_blockchain(uint64_t start_index, uint64_t end_index) { - LOG_PRINT_L3("Blockchain::" << __func__); - std::stringstream ss; - CRITICAL_REGION_LOCAL(m_blockchain_lock); - auto h = m_db->height(); - if(start_index > h) - { - LOG_PRINT_L1("Wrong starter index set: " << start_index << ", expected max index " << h); - return; - } + LOG_PRINT_L3("Blockchain::" << __func__); + std::stringstream ss; + CRITICAL_REGION_LOCAL(m_blockchain_lock); + auto h = m_db->height(); + if(start_index > h) + { + LOG_PRINT_L1("Wrong starter index set: " << start_index << ", expected max index " << h); + return; + } - for(size_t i = start_index; i <= h && i != end_index; i++) - { - ss << "height " << i - << ", timestamp " << m_db->get_block_timestamp(i) - << ", cumul_dif " << m_db->get_block_cumulative_difficulty(i) - << ", size " << m_db->get_block_size(i) - << "\nid\t\t" << m_db->get_block_hash_from_height(i) - << "\ndifficulty\t\t" << m_db->get_block_difficulty(i) - << ", nonce " << m_db->get_block_from_height(i).nonce - << ", tx_count " << m_db->get_block_from_height(i).tx_hashes.size() - << std::endl; - } - LOG_PRINT_L1("Current blockchain:" << std::endl << ss.str()); - LOG_PRINT_L0("Blockchain printed with log level 1"); + for(size_t i = start_index; i <= h && i != end_index; i++) + { + ss << "height " << i << ", timestamp " << m_db->get_block_timestamp(i) << ", cumul_dif " << m_db->get_block_cumulative_difficulty(i) << ", size " << m_db->get_block_size(i) << "\nid\t\t" << m_db->get_block_hash_from_height(i) << "\ndifficulty\t\t" << m_db->get_block_difficulty(i) << ", nonce " << m_db->get_block_from_height(i).nonce << ", tx_count " << m_db->get_block_from_height(i).tx_hashes.size() << std::endl; + } + LOG_PRINT_L1("Current blockchain:" << std::endl << ss.str()); + LOG_PRINT_L0("Blockchain printed with log level 1"); } //------------------------------------------------------------------ void Blockchain::print_blockchain_index() { - LOG_PRINT_L3("Blockchain::" << __func__); - std::stringstream ss; - CRITICAL_REGION_LOCAL(m_blockchain_lock); - auto height = m_db->height(); - if (height != 0) - { - for(uint64_t i = 0; i <= height; i++) + LOG_PRINT_L3("Blockchain::" << __func__); + std::stringstream ss; + CRITICAL_REGION_LOCAL(m_blockchain_lock); + auto height = m_db->height(); + if (height != 0) { - ss << "height: " << i << ", hash: " << m_db->get_block_hash_from_height(i); + for(uint64_t i = 0; i <= height; i++) + { + ss << "height: " << i << ", hash: " << m_db->get_block_hash_from_height(i); + } } - } - LOG_PRINT_L0("Current blockchain index:" << std::endl - << ss.str() - ); + LOG_PRINT_L0("Current blockchain index:" << std::endl << ss.str()); } //------------------------------------------------------------------ //TODO: remove this function and references to it void Blockchain::print_blockchain_outs(const std::string& file) { - LOG_PRINT_L3("Blockchain::" << __func__); - return; + LOG_PRINT_L3("Blockchain::" << __func__); + return; } //------------------------------------------------------------------ // Find the split point between us and foreign blockchain and return @@ -1618,22 +1714,22 @@ void Blockchain::print_blockchain_outs(const std::string& file) // BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT additional (more recent) hashes. bool Blockchain::find_blockchain_supplement(const std::list& qblock_ids, NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // if we can't find the split point, return false - if(!find_blockchain_supplement(qblock_ids, resp.start_height)) - { - return false; - } + // if we can't find the split point, return false + if(!find_blockchain_supplement(qblock_ids, resp.start_height)) + { + return false; + } - resp.total_height = get_current_blockchain_height(); - size_t count = 0; - for(size_t i = resp.start_height; i < resp.total_height && count < BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT; i++, count++) - { - resp.m_block_ids.push_back(m_db->get_block_hash_from_height(i)); - } - return true; + resp.total_height = get_current_blockchain_height(); + size_t count = 0; + for(size_t i = resp.start_height; i < resp.total_height && count < BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT; i++, count++) + { + resp.m_block_ids.push_back(m_db->get_block_hash_from_height(i)); + } + return true; } //------------------------------------------------------------------ //FIXME: change argument to std::vector, low priority @@ -1642,96 +1738,96 @@ bool Blockchain::find_blockchain_supplement(const std::list& qbloc // blocks by reference. bool Blockchain::find_blockchain_supplement(const uint64_t req_start_block, const std::list& qblock_ids, std::list > >& blocks, uint64_t& total_height, uint64_t& start_height, size_t max_count) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - // if a specific start height has been requested - if(req_start_block > 0) - { - // if requested height is higher than our chain, return false -- we can't help - if (req_start_block >= m_db->height()) + // if a specific start height has been requested + if(req_start_block > 0) { - return false; + // if requested height is higher than our chain, return false -- we can't help + if (req_start_block >= m_db->height()) + { + return false; + } + start_height = req_start_block; } - start_height = req_start_block; - } - else - { - if(!find_blockchain_supplement(qblock_ids, start_height)) + else { - return false; + if(!find_blockchain_supplement(qblock_ids, start_height)) + { + return false; + } } - } - total_height = get_current_blockchain_height(); - size_t count = 0; - for(size_t i = start_height; i < total_height && count < max_count; i++, count++) - { - blocks.resize(blocks.size()+1); - blocks.back().first = m_db->get_block_from_height(i); - std::list mis; - get_transactions(blocks.back().first.tx_hashes, blocks.back().second, mis); - CHECK_AND_ASSERT_MES(!mis.size(), false, "internal error, transaction from block not found"); - } - return true; + total_height = get_current_blockchain_height(); + size_t count = 0; + for(size_t i = start_height; i < total_height && count < max_count; i++, count++) + { + blocks.resize(blocks.size()+1); + blocks.back().first = m_db->get_block_from_height(i); + std::list mis; + get_transactions(blocks.back().first.tx_hashes, blocks.back().second, mis); + CHECK_AND_ASSERT_MES(!mis.size(), false, "internal error, transaction from block not found"); + } + return true; } //------------------------------------------------------------------ bool Blockchain::add_block_as_invalid(const block& bl, const crypto::hash& h) { - LOG_PRINT_L3("Blockchain::" << __func__); - block_extended_info bei = AUTO_VAL_INIT(bei); - bei.bl = bl; - return add_block_as_invalid(bei, h); + LOG_PRINT_L3("Blockchain::" << __func__); + block_extended_info bei = AUTO_VAL_INIT(bei); + bei.bl = bl; + return add_block_as_invalid(bei, h); } //------------------------------------------------------------------ bool Blockchain::add_block_as_invalid(const block_extended_info& bei, const crypto::hash& h) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - auto i_res = m_invalid_blocks.insert(std::map::value_type(h, bei)); - CHECK_AND_ASSERT_MES(i_res.second, false, "at insertion invalid by tx returned status existed"); - LOG_PRINT_L1("BLOCK ADDED AS INVALID: " << h << std::endl << ", prev_id=" << bei.bl.prev_id << ", m_invalid_blocks count=" << m_invalid_blocks.size()); - return true; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + auto i_res = m_invalid_blocks.insert(std::map::value_type(h, bei)); + CHECK_AND_ASSERT_MES(i_res.second, false, "at insertion invalid by tx returned status existed"); + LOG_PRINT_L1("BLOCK ADDED AS INVALID: " << h << std::endl << ", prev_id=" << bei.bl.prev_id << ", m_invalid_blocks count=" << m_invalid_blocks.size()); + return true; } //------------------------------------------------------------------ bool Blockchain::have_block(const crypto::hash& id) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); - if(m_db->block_exists(id)) - { - LOG_PRINT_L3("block exists in main chain"); - return true; - } + if(m_db->block_exists(id)) + { + LOG_PRINT_L3("block exists in main chain"); + return true; + } - if(m_alternative_chains.count(id)) - { - LOG_PRINT_L3("block found in m_alternative_chains"); - return true; - } + if(m_alternative_chains.count(id)) + { + LOG_PRINT_L3("block found in m_alternative_chains"); + return true; + } - if(m_invalid_blocks.count(id)) - { - LOG_PRINT_L3("block found in m_invalid_blocks"); - return true; - } + if(m_invalid_blocks.count(id)) + { + LOG_PRINT_L3("block found in m_invalid_blocks"); + return true; + } - return false; + return false; } //------------------------------------------------------------------ bool Blockchain::handle_block_to_main_chain(const block& bl, block_verification_context& bvc) { - LOG_PRINT_L3("Blockchain::" << __func__); - crypto::hash id = get_block_hash(bl); - return handle_block_to_main_chain(bl, id, bvc); + LOG_PRINT_L3("Blockchain::" << __func__); + crypto::hash id = get_block_hash(bl); + return handle_block_to_main_chain(bl, id, bvc); } //------------------------------------------------------------------ size_t Blockchain::get_total_transactions() const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - return m_db->get_tx_count(); + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + return m_db->get_tx_count(); } //------------------------------------------------------------------ // This function checks each input in the transaction to make sure it @@ -1742,245 +1838,372 @@ size_t Blockchain::get_total_transactions() const // remove them later if the block fails validation. bool Blockchain::check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - struct add_transaction_input_visitor: public boost::static_visitor - { - key_images_container& m_spent_keys; - BlockchainDB* m_db; - add_transaction_input_visitor(key_images_container& spent_keys, BlockchainDB* db):m_spent_keys(spent_keys), m_db(db) - {} - bool operator()(const txin_to_key& in) const + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + struct add_transaction_input_visitor: public boost::static_visitor { - const crypto::key_image& ki = in.k_image; + key_images_container& m_spent_keys; + BlockchainDB* m_db; + add_transaction_input_visitor(key_images_container& spent_keys, BlockchainDB* db) : + m_spent_keys(spent_keys), m_db(db) + { + } + bool operator()(const txin_to_key& in) const + { + const crypto::key_image& ki = in.k_image; - // attempt to insert the newly-spent key into the container of - // keys spent this block. If this fails, the key was spent already - // in this block, return false to flag that a double spend was detected. - // - // if the insert into the block-wide spent keys container succeeds, - // check the blockchain-wide spent keys container and make sure the - // key wasn't used in another block already. - auto r = m_spent_keys.insert(ki); - if(!r.second || m_db->has_key_image(ki)) - { - //double spend detected - return false; - } + // attempt to insert the newly-spent key into the container of + // keys spent this block. If this fails, the key was spent already + // in this block, return false to flag that a double spend was detected. + // + // if the insert into the block-wide spent keys container succeeds, + // check the blockchain-wide spent keys container and make sure the + // key wasn't used in another block already. + auto r = m_spent_keys.insert(ki); + if(!r.second || m_db->has_key_image(ki)) + { + //double spend detected + return false; + } - // if no double-spend detected, return true - return true; + // if no double-spend detected, return true + return true; + } + + bool operator()(const txin_gen& tx) const + { + return true; + } + bool operator()(const txin_to_script& tx) const + { + return false; + } + bool operator()(const txin_to_scripthash& tx) const + { + return false; + } + }; + + for (const txin_v& in : tx.vin) + { + if(!boost::apply_visitor(add_transaction_input_visitor(keys_this_block, m_db), in)) + { + LOG_ERROR("Double spend detected!"); + return false; + } } - bool operator()(const txin_gen& tx) const{return true;} - bool operator()(const txin_to_script& tx) const{return false;} - bool operator()(const txin_to_scripthash& tx) const{return false;} - }; - - for (const txin_v& in : tx.vin) - { - if(!boost::apply_visitor(add_transaction_input_visitor(keys_this_block, m_db), in)) - { - LOG_ERROR("Double spend detected!"); - return false; - } - } - - return true; + return true; } //------------------------------------------------------------------ bool Blockchain::get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector& indexs) const { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - if (!m_db->tx_exists(tx_id)) - { - LOG_PRINT_RED_L1("warning: get_tx_outputs_gindexs failed to find transaction with id = " << tx_id); - return false; - } + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + if (!m_db->tx_exists(tx_id)) + { + LOG_PRINT_RED_L1("warning: get_tx_outputs_gindexs failed to find transaction with id = " << tx_id); + return false; + } - // get amount output indexes, currently referred to in parts as "output global indices", but they are actually specific to amounts - indexs = m_db->get_tx_amount_output_indices(tx_id); - CHECK_AND_ASSERT_MES(indexs.size(), false, "internal error: global indexes for transaction " << tx_id << " is empty"); + // get amount output indexes, currently referred to in parts as "output global indices", but they are actually specific to amounts + indexs = m_db->get_tx_amount_output_indices(tx_id); + CHECK_AND_ASSERT_MES(indexs.size(), false, "internal error: global indexes for transaction " << tx_id << " is empty"); - return true; + return true; } //------------------------------------------------------------------ // This function overloads its sister function with // an extra value (hash of highest block that holds an output used as input) // as a return-by-reference. -bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t& max_used_block_height, crypto::hash& max_used_block_id) const +bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t& max_used_block_height, crypto::hash& max_used_block_id, bool kept_by_block) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - bool res = check_tx_inputs(tx, &max_used_block_height); - if(!res) return false; - CHECK_AND_ASSERT_MES(max_used_block_height < m_db->height(), false, "internal error: max used block index=" << max_used_block_height << " is not less then blockchain size = " << m_db->height()); - max_used_block_id = m_db->get_block_hash_from_height(max_used_block_height); - return true; + LOG_PRINT_L3("Blockchain::" << __func__); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + +#if defined(PER_BLOCK_CHECKPOINT) + // check if we're doing per-block checkpointing + if (m_db->height() < m_blocks_hash_check.size() && kept_by_block) + { + TIME_MEASURE_START(a); + m_blocks_txs_check.push_back(get_transaction_hash(tx)); + max_used_block_id = null_hash; + max_used_block_height = 0; + TIME_MEASURE_FINISH(a); + if(m_show_time_stats) + LOG_PRINT_L0("HASH: " << "-" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << 0 << " chcktx: " << a); + return true; + } +#endif + + TIME_MEASURE_START(a); + bool res = check_tx_inputs(tx, &max_used_block_height); + TIME_MEASURE_FINISH(a); + crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx); + if(m_show_time_stats) + LOG_PRINT_L0("HASH: " << "+" << " VIN/VOUT: " << tx.vin.size() << "/" << tx.vout.size() << " H: " << max_used_block_height << " chcktx: " << a + m_fake_scan_time); + + if (!res) + return false; + + // ND: Speedup: + // 1. keep a list of verified transactions, when the Blockchain tries to check a tx again, + // verify against list and skip if already verified to be correct. + m_check_tx_inputs_table.emplace(tx_prefix_hash, std::make_pair(res, max_used_block_height)); + + CHECK_AND_ASSERT_MES(max_used_block_height < m_db->height(), false, "internal error: max used block index=" << max_used_block_height << " is not less then blockchain size = " << m_db->height()); + max_used_block_id = m_db->get_block_hash_from_height(max_used_block_height); + return true; } //------------------------------------------------------------------ bool Blockchain::have_tx_keyimges_as_spent(const transaction &tx) const { - LOG_PRINT_L3("Blockchain::" << __func__); - BOOST_FOREACH(const txin_v& in, tx.vin) - { - CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, in_to_key, true); - if(have_tx_keyimg_as_spent(in_to_key.k_image)) - return true; - } - return false; + LOG_PRINT_L3("Blockchain::" << __func__); + BOOST_FOREACH(const txin_v& in, tx.vin) + { + CHECKED_GET_SPECIFIC_VARIANT(in, const txin_to_key, in_to_key, true); + if(have_tx_keyimg_as_spent(in_to_key.k_image)) + return true; + } + return false; } //------------------------------------------------------------------ // This function validates transaction inputs and their keys. Previously // it also performed double spend checking, but that has been moved to its // own function. -bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height) const +bool Blockchain::check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height) { - LOG_PRINT_L3("Blockchain::" << __func__); - size_t sig_index = 0; - if(pmax_used_block_height) - *pmax_used_block_height = 0; + LOG_PRINT_L3("Blockchain::" << __func__); + size_t sig_index = 0; + if(pmax_used_block_height) + *pmax_used_block_height = 0; - crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx); + crypto::hash tx_prefix_hash = get_transaction_prefix_hash(tx); - for (const auto& txin : tx.vin) - { - // make sure output being spent is of type txin_to_key, rather than - // e.g. txin_gen, which is only used for miner transactions - CHECK_AND_ASSERT_MES(txin.type() == typeid(txin_to_key), false, "wrong type id in tx input at Blockchain::check_tx_inputs"); - const txin_to_key& in_to_key = boost::get(txin); - - // make sure tx output has key offset(s) (is signed to be used) - CHECK_AND_ASSERT_MES(in_to_key.key_offsets.size(), false, "empty in_to_key.key_offsets in transaction with id " << get_transaction_hash(tx)); - - // basically, make sure number of inputs == number of signatures - CHECK_AND_ASSERT_MES(sig_index < tx.signatures.size(), false, "wrong transaction: not signature entry for input with index= " << sig_index); - - // make sure that output being spent matches up correctly with the - // signature spending it. - if(!check_tx_input(in_to_key, tx_prefix_hash, tx.signatures[sig_index], pmax_used_block_height)) + auto its = m_check_tx_inputs_table.find(tx_prefix_hash); + if (its != m_check_tx_inputs_table.end()) { - LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index); - if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain() - { - LOG_PRINT_L1(" *pmax_used_block_height: " << *pmax_used_block_height); - } - return false; + if (!its->second.first) + return false; + if (pmax_used_block_height) + *pmax_used_block_height = its->second.second; + return true; } - sig_index++; - } + uint64_t t_t1 = 0; + std::vector> pubkeys(tx.vin.size()); + std::vector < uint64_t > results; + results.resize(tx.vin.size(), 0); - return true; + int threads = std::thread::hardware_concurrency(); + + boost::asio::io_service ioservice; + boost::thread_group threadpool; + + std::auto_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice)); + for (int i = 0; i < threads; i++) + { + threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice)); + } + +#define KILL_IOSERVICE() \ + if(threads > 1) \ + { \ + work.reset(); \ + threadpool.join_all(); \ + ioservice.stop(); \ + } \ + + for (const auto& txin : tx.vin) + { + // make sure output being spent is of type txin_to_key, rather than + // e.g. txin_gen, which is only used for miner transactions + CHECK_AND_ASSERT_MES(txin.type() == typeid(txin_to_key), false, "wrong type id in tx input at Blockchain::check_tx_inputs"); + const txin_to_key& in_to_key = boost::get(txin); + + // make sure tx output has key offset(s) (is signed to be used) + CHECK_AND_ASSERT_MES(in_to_key.key_offsets.size(), false, "empty in_to_key.key_offsets in transaction with id " << get_transaction_hash(tx)); + + // basically, make sure number of inputs == number of signatures + CHECK_AND_ASSERT_MES(sig_index < tx.signatures.size(), false, "wrong transaction: not signature entry for input with index= " << sig_index); + + // make sure that output being spent matches up correctly with the + // signature spending it. + TIME_MEASURE_START(aa); + if (!check_tx_input(in_to_key, tx_prefix_hash, tx.signatures[sig_index], pubkeys[sig_index], pmax_used_block_height)) + { + LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index); + if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain() + { + LOG_PRINT_L1(" *pmax_used_block_height: " << *pmax_used_block_height); + } + + KILL_IOSERVICE(); + return false; + } + + bool result; + if (threads > 1) + { + // ND: Speedup + // 1. Thread ring signature verification if possible. + ioservice.dispatch(boost::bind(&Blockchain::check_ring_signature, this, std::cref(tx_prefix_hash), std::cref(in_to_key.k_image), std::cref(pubkeys[sig_index]), std::cref(tx.signatures[sig_index]), std::ref(results[sig_index]))); + } + else + { + check_ring_signature(tx_prefix_hash, in_to_key.k_image, pubkeys[sig_index], tx.signatures[sig_index], results[sig_index]); + if (!results[sig_index]) + { + LOG_PRINT_L1("Failed to check ring signature for tx " << get_transaction_hash(tx) << " vin key with k_image: " << in_to_key.k_image << " sig_index: " << sig_index); + + if (pmax_used_block_height) // a default value of NULL is used when called from Blockchain::handle_block_to_main_chain() + { + LOG_PRINT_L1("*pmax_used_block_height: " << *pmax_used_block_height); + } + + KILL_IOSERVICE(); + return false; + } + } + + sig_index++; + } + + KILL_IOSERVICE(); + + if (threads > 1) + { + for (size_t i = 0; i < tx.vin.size(); i++) + { + if (!results[i]) + { + LOG_PRINT_L1("Failed to check ring signatures!"); + return false; + } + } + } + LOG_PRINT_L1("t_loop: " << t_t1); + return true; } + +//------------------------------------------------------------------ +void Blockchain::check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image, const std::vector &pubkeys, const std::vector& sig, uint64_t &result) +{ + if (m_is_in_checkpoint_zone) + { + result = true; + return; + } + + std::vector p_output_keys; + for (auto &key : pubkeys) + { + p_output_keys.push_back(&key); + } + + result = crypto::check_ring_signature(tx_prefix_hash, key_image, p_output_keys, sig.data()) ? 1 : 0; +} + //------------------------------------------------------------------ // This function checks to see if a tx is unlocked. unlock_time is either // a block index or a unix time. bool Blockchain::is_tx_spendtime_unlocked(uint64_t unlock_time) const { - LOG_PRINT_L3("Blockchain::" << __func__); - if(unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) - { - //interpret as block index - if(get_current_blockchain_height() + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_BLOCKS >= unlock_time) - return true; + LOG_PRINT_L3("Blockchain::" << __func__); + if(unlock_time < CRYPTONOTE_MAX_BLOCK_NUMBER) + { + // ND: Instead of calling get_current_blockchain_height(), call m_db->height() + // directly as get_current_blockchain_height() locks the recursive mutex. + if(m_db->height() + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_BLOCKS >= unlock_time) + return true; + else + return false; + } else - return false; - }else - { - //interpret as time - uint64_t current_time = static_cast(time(NULL)); - if(current_time + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS >= unlock_time) - return true; - else - return false; - } - return false; + { + //interpret as time + uint64_t current_time = static_cast(time(NULL)); + if(current_time + CRYPTONOTE_LOCKED_TX_ALLOWED_DELTA_SECONDS >= unlock_time) + return true; + else + return false; + } + return false; } //------------------------------------------------------------------ // This function locates all outputs associated with a given input (mixins) // and validates that they exist and are usable. It also checks the ring // signature for each input. -bool Blockchain::check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, uint64_t* pmax_related_block_height) const +bool Blockchain::check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, std::vector &output_keys, uint64_t* pmax_related_block_height) { - LOG_PRINT_L3("Blockchain::" << __func__); - CRITICAL_REGION_LOCAL(m_blockchain_lock); + LOG_PRINT_L3("Blockchain::" << __func__); - struct outputs_visitor - { - std::vector& m_p_output_keys; - std::vector& m_output_keys; - const Blockchain& m_bch; - outputs_visitor(std::vector& output_keys, std::vector& p_output_keys, const Blockchain& bch) : m_output_keys(output_keys), m_p_output_keys(p_output_keys), m_bch(bch) - {} - bool handle_output(const transaction& tx, const tx_out& out) + // ND: + // 1. Disable locking and make method private. + //CRITICAL_REGION_LOCAL(m_blockchain_lock); + + struct outputs_visitor { - //check tx unlock time - if(!m_bch.is_tx_spendtime_unlocked(tx.unlock_time)) - { - LOG_PRINT_L1("One of outputs for one of inputs has wrong tx.unlock_time = " << tx.unlock_time); - return false; - } + std::vector& m_output_keys; + const Blockchain& m_bch; + outputs_visitor(std::vector& output_keys, const Blockchain& bch) : + m_output_keys(output_keys), m_bch(bch) + { + } + bool handle_output(uint64_t unlock_time, const crypto::public_key &pubkey) + { + //check tx unlock time + if (!m_bch.is_tx_spendtime_unlocked(unlock_time)) + { + LOG_PRINT_L1("One of outputs for one of inputs has wrong tx.unlock_time = " << unlock_time); + return false; + } - if(out.target.type() != typeid(txout_to_key)) - { - LOG_PRINT_L1("Output has wrong type id, which=" << out.target.which()); - return false; - } + m_output_keys.push_back(pubkey); + return true; + } + }; - m_output_keys.push_back(boost::get(out.target).key); - return true; + output_keys.clear(); + + //check ring signature + outputs_visitor vi(output_keys, *this); + if (!scan_outputkeys_for_indexes(txin, vi, tx_prefix_hash, pmax_related_block_height)) + { + LOG_PRINT_L1("Failed to get output keys for tx with amount = " << print_money(txin.amount) << " and count indexes " << txin.key_offsets.size()); + return false; } - }; - //check ring signature - std::vector output_keys; - std::vector p_output_keys; - outputs_visitor vi(output_keys, p_output_keys, *this); - if(!scan_outputkeys_for_indexes(txin, vi, pmax_related_block_height)) - { - LOG_PRINT_L1("Failed to get output keys for tx with amount = " << print_money(txin.amount) << " and count indexes " << txin.key_offsets.size()); - return false; - } - - for (auto& k : output_keys) - { - p_output_keys.push_back(&k); - } - - if(txin.key_offsets.size() != output_keys.size()) - { - LOG_PRINT_L1("Output keys for tx with amount = " << txin.amount << " and count indexes " << txin.key_offsets.size() << " returned wrong keys count " << output_keys.size()); - return false; - } - CHECK_AND_ASSERT_MES(sig.size() == output_keys.size(), false, "internal error: tx signatures count=" << sig.size() << " mismatch with outputs keys count for inputs=" << output_keys.size()); - if(m_is_in_checkpoint_zone) + if(txin.key_offsets.size() != output_keys.size()) + { + LOG_PRINT_L1("Output keys for tx with amount = " << txin.amount << " and count indexes " << txin.key_offsets.size() << " returned wrong keys count " << output_keys.size()); + return false; + } + CHECK_AND_ASSERT_MES(sig.size() == output_keys.size(), false, "internal error: tx signatures count=" << sig.size() << " mismatch with outputs keys count for inputs=" << output_keys.size()); return true; - return crypto::check_ring_signature(tx_prefix_hash, txin.k_image, p_output_keys, sig.data()); } //------------------------------------------------------------------ //TODO: Is this intended to do something else? Need to look into the todo there. uint64_t Blockchain::get_adjusted_time() const { - LOG_PRINT_L3("Blockchain::" << __func__); - //TODO: add collecting median time - return time(NULL); + LOG_PRINT_L3("Blockchain::" << __func__); + //TODO: add collecting median time + return time(NULL); } //------------------------------------------------------------------ //TODO: revisit, has changed a bit on upstream bool Blockchain::check_block_timestamp(std::vector& timestamps, const block& b) const { - LOG_PRINT_L3("Blockchain::" << __func__); - uint64_t median_ts = epee::misc_utils::median(timestamps); + LOG_PRINT_L3("Blockchain::" << __func__); + uint64_t median_ts = epee::misc_utils::median(timestamps); - if(b.timestamp < median_ts) - { - LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", less than median of last " << BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW << " blocks, " << median_ts); - return false; - } + if(b.timestamp < median_ts) + { + LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", less than median of last " << BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW << " blocks, " << median_ts); + return false; + } - return true; + return true; } //------------------------------------------------------------------ // This function grabs the timestamps from the most recent blocks, @@ -1992,32 +2215,32 @@ bool Blockchain::check_block_timestamp(std::vector& timestamps, const // false otherwise bool Blockchain::check_block_timestamp(const block& b) const { - LOG_PRINT_L3("Blockchain::" << __func__); - if(b.timestamp > get_adjusted_time() + CRYPTONOTE_BLOCK_FUTURE_TIME_LIMIT) - { - LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", bigger than adjusted time + 2 hours"); - return false; - } + LOG_PRINT_L3("Blockchain::" << __func__); + if(b.timestamp > get_adjusted_time() + CRYPTONOTE_BLOCK_FUTURE_TIME_LIMIT) + { + LOG_PRINT_L1("Timestamp of block with id: " << get_block_hash(b) << ", " << b.timestamp << ", bigger than adjusted time + 2 hours"); + return false; + } - // if not enough blocks, no proper median yet, return true - if(m_db->height() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW + 1) - { - return true; - } + // if not enough blocks, no proper median yet, return true + if(m_db->height() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW + 1) + { + return true; + } - std::vector timestamps; - auto h = m_db->height(); + std::vector timestamps; + auto h = m_db->height(); - // need most recent 60 blocks, get index of first of those - // using +1 because BlockchainDB::height() returns the index of the top block, - // not the size of the blockchain (0-indexed) - size_t offset = h - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - 1; - for(;offset < h; ++offset) - { - timestamps.push_back(m_db->get_block_timestamp(offset)); - } + // need most recent 60 blocks, get index of first of those + // using +1 because BlockchainDB::height() returns the index of the top block, + // not the size of the blockchain (0-indexed) + size_t offset = h - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW - 1; + for(;offset < h; ++offset) + { + timestamps.push_back(m_db->get_block_timestamp(offset)); + } - return check_block_timestamp(timestamps, b); + return check_block_timestamp(timestamps, b); } //------------------------------------------------------------------ // Needs to validate the block and acquire each transaction from the @@ -2025,295 +2248,371 @@ bool Blockchain::check_block_timestamp(const block& b) const // m_db->add_block() bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc) { - LOG_PRINT_L3("Blockchain::" << __func__); + LOG_PRINT_L3("Blockchain::" << __func__); - TIME_MEASURE_START(block_processing_time); - CRITICAL_REGION_LOCAL(m_blockchain_lock); - if(bl.prev_id != get_tail_id()) - { - LOG_PRINT_L1("Block with id: " << id << std::endl - << "has wrong prev_id: " << bl.prev_id << std::endl - << "expected: " << get_tail_id()); - return false; - } + TIME_MEASURE_START(block_processing_time); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + TIME_MEASURE_START(t1); - // make sure block timestamp is not less than the median timestamp - // of a set number of the most recent blocks. - if(!check_block_timestamp(bl)) - { - LOG_PRINT_L1("Block with id: " << id << std::endl - << "has invalid timestamp: " << bl.timestamp); - bvc.m_verifivation_failed = true; - return false; - } - - //check proof of work - TIME_MEASURE_START(target_calculating_time); - - // get the target difficulty for the block. - // the calculation can overflow, among other failure cases, - // so we need to check the return type. - // FIXME: get_difficulty_for_next_block can also assert, look into - // changing this to throwing exceptions instead so we can clean up. - difficulty_type current_diffic = get_difficulty_for_next_block(); - CHECK_AND_ASSERT_MES(current_diffic, false, "!!!!!!!!! difficulty overhead !!!!!!!!!"); - - TIME_MEASURE_FINISH(target_calculating_time); - - TIME_MEASURE_START(longhash_calculating_time); - - crypto::hash proof_of_work = null_hash; - - // Formerly the code below contained an if loop with the following condition - // !m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height()) - // however, this caused the daemon to not bother checking PoW for blocks - // before checkpoints, which is very dangerous behaviour. We moved the PoW - // validation out of the next chunk of code to make sure that we correctly - // check PoW now. - // FIXME: height parameter is not used...should it be used or should it not - // be a parameter? - proof_of_work = get_block_longhash(bl, m_db->height()); - - // validate proof_of_work versus difficulty target - if(!check_hash(proof_of_work, current_diffic)) - { - LOG_PRINT_L1("Block with id: " << id << std::endl - << "does not have enough proof of work: " << proof_of_work << std::endl - << "unexpected difficulty: " << current_diffic ); - bvc.m_verifivation_failed = true; - return false; - } - - // If we're at a checkpoint, ensure that our hardcoded checkpoint hash - // is correct. - if(m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height())) - { - if(!m_checkpoints.check_block(get_current_blockchain_height(), id)) + if(bl.prev_id != get_tail_id()) { - LOG_ERROR("CHECKPOINT VALIDATION FAILED"); - bvc.m_verifivation_failed = true; - return false; - } - } - - TIME_MEASURE_FINISH(longhash_calculating_time); - - // sanity check basic miner tx properties - if(!prevalidate_miner_transaction(bl, m_db->height())) - { - LOG_PRINT_L1("Block with id: " << id - << " failed to pass prevalidation"); - bvc.m_verifivation_failed = true; - return false; - } - - size_t coinbase_blob_size = get_object_blobsize(bl.miner_tx); - size_t cumulative_block_size = coinbase_blob_size; - - std::vector txs; - key_images_container keys; - - uint64_t fee_summary = 0; - uint64_t t_checktx = 0; - - // Iterate over the block's transaction hashes, grabbing each - // from the tx_pool and validating them. Each is then added - // to txs. Keys spent in each are added to by the double spend check. - for (const crypto::hash& tx_id : bl.tx_hashes) - { - transaction tx; - size_t blob_size = 0; - uint64_t fee = 0; - - if (m_db->tx_exists(tx_id)) - { - LOG_PRINT_L1("Block with id: " << id << " attempting to add transaction already in blockchain with id: " << tx_id); - bvc.m_verifivation_failed = true; - break; + LOG_PRINT_L1("Block with id: " << id << std::endl << "has wrong prev_id: " << bl.prev_id << std::endl << "expected: " << get_tail_id()); + return false; } - // get transaction with hash from tx_pool - if(!m_tx_pool.take_tx(tx_id, tx, blob_size, fee)) + TIME_MEASURE_FINISH(t1); + TIME_MEASURE_START(t2); + + // make sure block timestamp is not less than the median timestamp + // of a set number of the most recent blocks. + if(!check_block_timestamp(bl)) { - LOG_PRINT_L1("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id); - bvc.m_verifivation_failed = true; - break; + LOG_PRINT_L1("Block with id: " << id << std::endl << "has invalid timestamp: " << bl.timestamp); + bvc.m_verifivation_failed = true; + return false; } - // add the transaction to the temp list of transactions, so we can either - // store the list of transactions all at once or return the ones we've - // taken from the tx_pool back to it if the block fails verification. - txs.push_back(tx); + TIME_MEASURE_FINISH(t2); + //check proof of work + TIME_MEASURE_START(target_calculating_time); - TIME_MEASURE_START(aa); - // validate that transaction inputs and the keys spending them are correct. - if(!check_tx_inputs(tx)) + // get the target difficulty for the block. + // the calculation can overflow, among other failure cases, + // so we need to check the return type. + // FIXME: get_difficulty_for_next_block can also assert, look into + // changing this to throwing exceptions instead so we can clean up. + difficulty_type current_diffic = get_difficulty_for_next_block(); + CHECK_AND_ASSERT_MES(current_diffic, false, "!!!!!!!!! difficulty overhead !!!!!!!!!"); + + TIME_MEASURE_FINISH(target_calculating_time); + + TIME_MEASURE_START(longhash_calculating_time); + + crypto::hash proof_of_work = null_hash; + + // Formerly the code below contained an if loop with the following condition + // !m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height()) + // however, this caused the daemon to not bother checking PoW for blocks + // before checkpoints, which is very dangerous behaviour. We moved the PoW + // validation out of the next chunk of code to make sure that we correctly + // check PoW now. + // FIXME: height parameter is not used...should it be used or should it not + // be a parameter? + // validate proof_of_work versus difficulty target + bool precomputed = false; +#if defined(PER_BLOCK_CHECKPOINT) + bool fast_check = false; + if (m_db->height() < m_blocks_hash_check.size()) { - LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs."); - - //TODO: why is this done? make sure that keeping invalid blocks makes sense. - add_block_as_invalid(bl, id); - LOG_PRINT_L1("Block with id " << id << " added as invalid becouse of wrong inputs in transactions"); - bvc.m_verifivation_failed = true; - break; + auto hash = get_block_hash(bl); + if (memcmp(&hash, &m_blocks_hash_check[m_db->height()], sizeof(hash)) != 0) + { + LOG_PRINT_L1("Block with id is INVALID: " << id); + bvc.m_verifivation_failed = true; + return false; + } + fast_check = true; } - TIME_MEASURE_FINISH(aa); - t_checktx += aa; - - if (!check_for_double_spend(tx, keys)) + else +#endif { - LOG_PRINT_L0("Double spend detected in transaction (id: " << tx_id); - bvc.m_verifivation_failed = true; - break; + auto it = m_blocks_longhash_table.find(id); + if (it != m_blocks_longhash_table.end()) + { + precomputed = true; + proof_of_work = it->second; + } + else + proof_of_work = get_block_longhash(bl, m_db->height()); + + // validate proof_of_work versus difficulty target + if(!check_hash(proof_of_work, current_diffic)) + { + LOG_PRINT_L1("Block with id: " << id << std::endl << "does not have enough proof of work: " << proof_of_work << std::endl << "unexpected difficulty: " << current_diffic); + bvc.m_verifivation_failed = true; + return false; + } } - fee_summary += fee; - cumulative_block_size += blob_size; - } - - uint64_t base_reward = 0; - uint64_t already_generated_coins = m_db->height() ? m_db->get_block_already_generated_coins(m_db->height() - 1) : 0; - if(!validate_miner_transaction(bl, cumulative_block_size, fee_summary, base_reward, already_generated_coins)) - { - LOG_PRINT_L1("Block with id: " << id - << " has incorrect miner transaction"); - bvc.m_verifivation_failed = true; - } - - - block_extended_info bei = boost::value_initialized(); - size_t block_size; - difficulty_type cumulative_difficulty; - - // populate various metadata about the block to be stored alongside it. - block_size = cumulative_block_size; - cumulative_difficulty = current_diffic; - already_generated_coins = already_generated_coins + base_reward; - if(m_db->height()) - cumulative_difficulty += m_db->get_block_cumulative_difficulty(m_db->height() - 1); - - update_next_cumulative_size_limit(); - - TIME_MEASURE_FINISH(block_processing_time); - - uint64_t new_height = 0; - bool add_success = true; - if (!bvc.m_verifivation_failed) - { - try + // If we're at a checkpoint, ensure that our hardcoded checkpoint hash + // is correct. + if(m_checkpoints.is_in_checkpoint_zone(get_current_blockchain_height())) { - new_height = m_db->add_block(bl, block_size, cumulative_difficulty, already_generated_coins, txs); + if(!m_checkpoints.check_block(get_current_blockchain_height(), id)) + { + LOG_ERROR("CHECKPOINT VALIDATION FAILED"); + bvc.m_verifivation_failed = true; + return false; + } } - catch (const std::exception& e) + + TIME_MEASURE_FINISH(longhash_calculating_time); + if (precomputed) + longhash_calculating_time += m_fake_pow_calc_time; + + TIME_MEASURE_START(t3); + + // sanity check basic miner tx properties; + if(!prevalidate_miner_transaction(bl, m_db->height())) { - //TODO: figure out the best way to deal with this failure - LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what()); - add_success = false; + LOG_PRINT_L1("Block with id: " << id << " failed to pass prevalidation"); + bvc.m_verifivation_failed = true; + return false; } - } - // if we failed for any reason to verify the block, return taken - // transactions to the tx_pool. - if (bvc.m_verifivation_failed || !add_success) - { - // return taken transactions to transaction pool - for (auto& tx : txs) + size_t coinbase_blob_size = get_object_blobsize(bl.miner_tx); + size_t cumulative_block_size = coinbase_blob_size; + + std::vector txs; + key_images_container keys; + + uint64_t fee_summary = 0; + uint64_t t_checktx = 0; + uint64_t t_exists = 0; + uint64_t t_pool = 0; + uint64_t t_dblspnd = 0; + uint64_t t_cc; + TIME_MEASURE_FINISH(t3); + + int tx_index = 0; + // Iterate over the block's transaction hashes, grabbing each + // from the tx_pool and validating them. Each is then added + // to txs. Keys spent in each are added to by the double spend check. + for (const crypto::hash& tx_id : bl.tx_hashes) { - cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); - if (!m_tx_pool.add_tx(tx, tvc, true)) - { - LOG_PRINT_L0("Failed to return taken transaction with hash: " << get_transaction_hash(tx) << " to tx_pool"); - } + transaction tx; + size_t blob_size = 0; + uint64_t fee = 0; + TIME_MEASURE_START(aa); + + if (m_db->tx_exists(tx_id)) + { + LOG_PRINT_L1("Block with id: " << id << " attempting to add transaction already in blockchain with id: " << tx_id); + bvc.m_verifivation_failed = true; + break; + } + + TIME_MEASURE_FINISH(aa); + t_exists += aa; + TIME_MEASURE_START(bb); + + // get transaction with hash from tx_pool + if(!m_tx_pool.take_tx(tx_id, tx, blob_size, fee)) + { + LOG_PRINT_L1("Block with id: " << id << " has at least one unknown transaction with id: " << tx_id); + bvc.m_verifivation_failed = true; + break; + } + + TIME_MEASURE_FINISH(bb); + t_pool += bb; + // add the transaction to the temp list of transactions, so we can either + // store the list of transactions all at once or return the ones we've + // taken from the tx_pool back to it if the block fails verification. + txs.push_back(tx); + TIME_MEASURE_START(dd); + + // ND: this is not needed, db->add_block() checks for duplicate k_images and fails accordingly. + // if (!check_for_double_spend(tx, keys)) + // { + // LOG_PRINT_L0("Double spend detected in transaction (id: " << tx_id); + // bvc.m_verifivation_failed = true; + // break; + // } + + TIME_MEASURE_FINISH(dd); + t_dblspnd += dd; + TIME_MEASURE_START(cc); + +#if defined(PER_BLOCK_CHECKPOINT) + if (!fast_check) +#endif + { + // validate that transaction inputs and the keys spending them are correct. + if(!check_tx_inputs(tx)) + { + LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs."); + + //TODO: why is this done? make sure that keeping invalid blocks makes sense. + add_block_as_invalid(bl, id); + LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions"); + bvc.m_verifivation_failed = true; + break; + } + } +#if defined(PER_BLOCK_CHECKPOINT) + else + { + // ND: if fast_check is enabled for blocks, there is no need to check + // the transaction inputs, but do some sanity checks anyway. + if (memcmp(&m_blocks_txs_check[tx_index++], &tx_id, sizeof(tx_id)) != 0) + { + LOG_PRINT_L1("Block with id: " << id << " has at least one transaction (id: " << tx_id << ") with wrong inputs."); + //TODO: why is this done? make sure that keeping invalid blocks makes sense. + add_block_as_invalid(bl, id); + LOG_PRINT_L1("Block with id " << id << " added as invalid because of wrong inputs in transactions"); + bvc.m_verifivation_failed = true; + break; + } + } +#endif + TIME_MEASURE_FINISH(cc); + t_checktx += cc; + fee_summary += fee; + cumulative_block_size += blob_size; } - return false; - } - LOG_PRINT_L1("+++++ BLOCK SUCCESSFULLY ADDED" << std::endl << "id:\t" << id - << std::endl << "PoW:\t" << proof_of_work - << std::endl << "HEIGHT " << new_height << ", difficulty:\t" << current_diffic - << std::endl << "block reward: " << print_money(fee_summary + base_reward) << "(" << print_money(base_reward) << " + " << print_money(fee_summary) - << "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size - << ", " << block_processing_time << "("<< target_calculating_time << "/" << longhash_calculating_time << ")ms"); + m_blocks_txs_check.clear(); - LOG_PRINT_L0("Height: " << new_height << " blob: " << coinbase_blob_size << - " cumm: " << cumulative_block_size << " p/t: " << block_processing_time - << " ("<< target_calculating_time << "/" << longhash_calculating_time << "/" << t_checktx << ")ms"); - bvc.m_added_to_main_chain = true; + TIME_MEASURE_START(vmt); + uint64_t base_reward = 0; + uint64_t already_generated_coins = m_db->height() ? m_db->get_block_already_generated_coins(m_db->height() - 1) : 0; + if(!validate_miner_transaction(bl, cumulative_block_size, fee_summary, base_reward, already_generated_coins)) + { + LOG_PRINT_L1("Block with id: " << id << " has incorrect miner transaction"); + bvc.m_verifivation_failed = true; + } - // appears to be a NOP *and* is called elsewhere. wat? - m_tx_pool.on_blockchain_inc(new_height, id); + TIME_MEASURE_FINISH(vmt); + block_extended_info bei = boost::value_initialized(); + size_t block_size; + difficulty_type cumulative_difficulty; - return true; + // populate various metadata about the block to be stored alongside it. + block_size = cumulative_block_size; + cumulative_difficulty = current_diffic; + already_generated_coins = already_generated_coins + base_reward; + if(m_db->height()) + cumulative_difficulty += m_db->get_block_cumulative_difficulty(m_db->height() - 1); + + update_next_cumulative_size_limit(); + + TIME_MEASURE_FINISH(block_processing_time); + if(precomputed) + block_processing_time += m_fake_pow_calc_time; + + TIME_MEASURE_START(addblock); + uint64_t new_height = 0; + bool add_success = true; + if (!bvc.m_verifivation_failed) + { + try + { + new_height = m_db->add_block(bl, block_size, cumulative_difficulty, already_generated_coins, txs); + } + catch (const std::exception& e) + { + //TODO: figure out the best way to deal with this failure + LOG_ERROR("Error adding block with hash: " << id << " to blockchain, what = " << e.what()); + add_success = false; + } + } + + // if we failed for any reason to verify the block, return taken + // transactions to the tx_pool. + if (bvc.m_verifivation_failed || !add_success) + { + // return taken transactions to transaction pool + for (auto& tx : txs) + { + cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); + if (!m_tx_pool.add_tx(tx, tvc, true)) + { + LOG_PRINT_L0("Failed to return taken transaction with hash: " << get_transaction_hash(tx) << " to tx_pool"); + } + } + return false; + } + + TIME_MEASURE_FINISH(addblock); + + LOG_PRINT_L1("+++++ BLOCK SUCCESSFULLY ADDED" << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "HEIGHT " << new_height << ", difficulty:\t" << current_diffic << std::endl << "block reward: " << print_money(fee_summary + base_reward) << "(" << print_money(base_reward) << " + " << print_money(fee_summary) << "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size << ", " << block_processing_time << "(" << target_calculating_time << "/" << longhash_calculating_time << ")ms"); + if(m_show_time_stats) + { + LOG_PRINT_L0("Height: " << new_height << " blob: " << coinbase_blob_size << " cumm: " + << cumulative_block_size << " p/t: " << block_processing_time << " (" + << target_calculating_time << "/" << longhash_calculating_time << "/" + << t1 << "/" << t2 << "/" << t3 << "/" << t_exists << "/" << t_pool + << "/" << t_checktx << "/" << t_dblspnd << "/" << vmt << "/" << addblock << ")ms"); + } + + bvc.m_added_to_main_chain = true; + ++m_sync_counter; + + // appears to be a NOP *and* is called elsewhere. wat? + m_tx_pool.on_blockchain_inc(new_height, id); + + return true; } //------------------------------------------------------------------ bool Blockchain::update_next_cumulative_size_limit() { - LOG_PRINT_L3("Blockchain::" << __func__); - std::vector sz; - get_last_n_blocks_sizes(sz, CRYPTONOTE_REWARD_BLOCKS_WINDOW); + LOG_PRINT_L3("Blockchain::" << __func__); + std::vector sz; + get_last_n_blocks_sizes(sz, CRYPTONOTE_REWARD_BLOCKS_WINDOW); - uint64_t median = epee::misc_utils::median(sz); - if(median <= CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE) - median = CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE; + uint64_t median = epee::misc_utils::median(sz); + if(median <= CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE) + median = CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE; - m_current_block_cumul_sz_limit = median*2; - return true; + m_current_block_cumul_sz_limit = median*2; + return true; } //------------------------------------------------------------------ bool Blockchain::add_new_block(const block& bl_, block_verification_context& bvc) { - LOG_PRINT_L3("Blockchain::" << __func__); - //copy block here to let modify block.target - block bl = bl_; - crypto::hash id = get_block_hash(bl); - CRITICAL_REGION_LOCAL(m_tx_pool);//to avoid deadlock lets lock tx_pool for whole add/reorganize process - CRITICAL_REGION_LOCAL1(m_blockchain_lock); - if(have_block(id)) - { - LOG_PRINT_L3("block with id = " << id << " already exists"); - bvc.m_already_exists = true; - return false; - } + LOG_PRINT_L3("Blockchain::" << __func__); + //copy block here to let modify block.target + block bl = bl_; + crypto::hash id = get_block_hash(bl); + CRITICAL_REGION_LOCAL(m_tx_pool);//to avoid deadlock lets lock tx_pool for whole add/reorganize process + CRITICAL_REGION_LOCAL1(m_blockchain_lock); + if(have_block(id)) + { + LOG_PRINT_L3("block with id = " << id << " already exists"); + bvc.m_already_exists = true; + return false; + } - //check that block refers to chain tail - if(!(bl.prev_id == get_tail_id())) - { - //chain switching or wrong block - bvc.m_added_to_main_chain = false; - return handle_alternative_block(bl, id, bvc); - //never relay alternative blocks - } + //check that block refers to chain tail + if(!(bl.prev_id == get_tail_id())) + { + //chain switching or wrong block + bvc.m_added_to_main_chain = false; + return handle_alternative_block(bl, id, bvc); + //never relay alternative blocks + } - return handle_block_to_main_chain(bl, id, bvc); + return handle_block_to_main_chain(bl, id, bvc); } //------------------------------------------------------------------ void Blockchain::check_against_checkpoints(const checkpoints& points, bool enforce) { - const auto& pts = points.get_points(); + const auto& pts = points.get_points(); - for (const auto& pt : pts) - { - // if the checkpoint is for a block we don't have yet, move on - if (pt.first >= m_db->height()) + for (const auto& pt : pts) { - continue; - } + // if the checkpoint is for a block we don't have yet, move on + if (pt.first >= m_db->height()) + { + continue; + } - if (!points.check_block(pt.first, m_db->get_block_hash_from_height(pt.first))) - { - // if asked to enforce checkpoints, roll back to a couple of blocks before the checkpoint - if (enforce) - { - LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!"); - std::list empty; - rollback_blockchain_switching(empty, pt.first - 2); - } - else - { - LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option"); - } + if (!points.check_block(pt.first, m_db->get_block_hash_from_height(pt.first))) + { + // if asked to enforce checkpoints, roll back to a couple of blocks before the checkpoint + if (enforce) + { + LOG_ERROR("Local blockchain failed to pass a checkpoint, rolling back!"); + std::list empty; + rollback_blockchain_switching(empty, pt.first - 2); + } + else + { + LOG_ERROR("WARNING: local blockchain failed to pass a MoneroPulse checkpoint, and you could be on a fork. You should either sync up from scratch, OR download a fresh blockchain bootstrap, OR enable checkpoint enforcing with the --enforce-dns-checkpointing command-line option"); + } + } } - } } //------------------------------------------------------------------ // returns false if any of the checkpoints loading returns false. @@ -2321,40 +2620,441 @@ void Blockchain::check_against_checkpoints(const checkpoints& points, bool enfor // with an existing checkpoint. bool Blockchain::update_checkpoints(const std::string& file_path, bool check_dns) { - if (!cryptonote::load_checkpoints_from_json(m_checkpoints, file_path)) - { - return false; - } - - // if we're checking both dns and json, load checkpoints from dns. - // if we're not hard-enforcing dns checkpoints, handle accordingly - if (m_enforce_dns_checkpoints && check_dns) - { - if (!cryptonote::load_checkpoints_from_dns(m_checkpoints)) + if (!cryptonote::load_checkpoints_from_json(m_checkpoints, file_path)) { - return false; + return false; } - } - else if (check_dns) - { - checkpoints dns_points; - cryptonote::load_checkpoints_from_dns(dns_points); - if (m_checkpoints.check_for_conflicts(dns_points)) - { - check_against_checkpoints(dns_points, false); - } - else - { - LOG_PRINT_L0("One or more checkpoints fetched from DNS conflicted with existing checkpoints!"); - } - } - check_against_checkpoints(m_checkpoints, true); + // if we're checking both dns and json, load checkpoints from dns. + // if we're not hard-enforcing dns checkpoints, handle accordingly + if (m_enforce_dns_checkpoints && check_dns) + { + if (!cryptonote::load_checkpoints_from_dns(m_checkpoints)) + { + return false; + } + } + else if (check_dns) + { + checkpoints dns_points; + cryptonote::load_checkpoints_from_dns(dns_points); + if (m_checkpoints.check_for_conflicts(dns_points)) + { + check_against_checkpoints(dns_points, false); + } + else + { + LOG_PRINT_L0("One or more checkpoints fetched from DNS conflicted with existing checkpoints!"); + } + } - return true; + check_against_checkpoints(m_checkpoints, true); + + return true; } //------------------------------------------------------------------ void Blockchain::set_enforce_dns_checkpoints(bool enforce_checkpoints) { - m_enforce_dns_checkpoints = enforce_checkpoints; + m_enforce_dns_checkpoints = enforce_checkpoints; +} + +//------------------------------------------------------------------ +void Blockchain::block_longhash_worker(const uint64_t height, const std::vector &blocks, std::unordered_map &map) const +{ + TIME_MEASURE_START(t); + slow_hash_allocate_state(); + + for (const auto & block : blocks) + { + crypto::hash id = get_block_hash(block); + crypto::hash pow = get_block_longhash(block, height); + map.emplace(id, pow); + } + + slow_hash_free_state(); + TIME_MEASURE_FINISH(t); +} + +//------------------------------------------------------------------ +bool Blockchain::cleanup_handle_incoming_blocks(bool force_sync) +{ + LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + TIME_MEASURE_START(t1); + auto height = m_db->height(); + + if (m_sync_counter > 0) + { + if (force_sync) + { + if(m_db_sync_mode != db_nosync) + store_blockchain(); + m_sync_counter = 0; + } + else if (m_sync_counter >= m_db_blocks_per_sync) + { + if(m_db_sync_mode == db_async) + { + m_sync_counter = 0; + m_async_service.dispatch(boost::bind(&Blockchain::store_blockchain, this)); + } + else if(m_db_sync_mode == db_sync) + { + store_blockchain(); + } + else // db_nosync + { + // DO NOTHING, not required to call sync. + } + } + } + + TIME_MEASURE_FINISH(t1); + m_blocks_longhash_table.clear(); + m_scan_table.clear(); + m_check_tx_inputs_table.clear(); + m_blocks_txs_check.clear(); + + return true; +} + +//------------------------------------------------------------------ +void Blockchain::output_scan_worker(const uint64_t amount, const std::vector &offsets, std::vector &outputs, std::unordered_map &txs) const +{ + try + { + m_db->get_output_key(amount, offsets, outputs); + } + catch (const std::exception& e) + { + LOG_PRINT_L1("EXCEPTION: " << e.what()); + } + catch (...) + { + + } +} + +//------------------------------------------------------------------ +// ND: Speedups: +// 1. Thread long_hash computations if possible (m_max_prepare_blocks_threads = nthreads, default = 4) +// 2. Group all amounts (from txs) and related absolute offsets and form a table of tx_prefix_hash +// vs [k_image, output_keys] (m_scan_table). This is faster because it takes advantage of bulk queries +// and is threaded if possible. The table (m_scan_table) will be used later when querying output +// keys. +bool Blockchain::prepare_handle_incoming_blocks(const std::list &blocks_entry) +{ + LOG_PRINT_YELLOW("Blockchain::" << __func__, LOG_LEVEL_3); + TIME_MEASURE_START(prepare); + CRITICAL_REGION_LOCAL(m_blockchain_lock); + + if(blocks_entry.size() == 0) + return false; + + if ((m_db->height() + blocks_entry.size()) < m_blocks_hash_check.size()) + return true; + + bool blocks_exist = false; + uint64_t threads = std::thread::hardware_concurrency(); + + if (blocks_entry.size() > 1 && threads > 1 && m_max_prepare_blocks_threads > 1) + { + // limit threads, default limit = 4 + if(threads > m_max_prepare_blocks_threads) + threads = m_max_prepare_blocks_threads; + + uint64_t height = m_db->height(); + std::vector thread_list; + int batches = blocks_entry.size() / threads; + int extra = blocks_entry.size() % threads; + LOG_PRINT_L1("block_batches: " << batches); + std::vector> maps(threads); + std::vector < std::vector < block >> blocks(threads); + auto it = blocks_entry.begin(); + + for (uint64_t i = 0; i < threads; i++) + { + for (int j = 0; j < batches; j++) + { + block block; + + if (!parse_and_validate_block_from_blob(it->block, block)) + { + std::advance(it, 1); + continue; + } + + // check first block and skip all blocks if its not chained properly + if (i == 0 && j == 0) + { + crypto::hash tophash = m_db->top_block_hash(); + if (block.prev_id != tophash) + { + LOG_PRINT_L0("Skipping prepare blocks. New blocks don't belong to chain.") + return true; + } + } + if (have_block(get_block_hash(block))) + { + blocks_exist = true; + break; + } + + blocks[i].push_back(block); + std::advance(it, 1); + } + } + + for (int i = 0; i < extra && !blocks_exist; i++) + { + block block; + + if (!parse_and_validate_block_from_blob(it->block, block)) + { + std::advance(it, 1); + continue; + } + + if (have_block(get_block_hash(block))) + { + blocks_exist = true; + break; + } + + blocks[i].push_back(block); + std::advance(it, 1); + } + + if (!blocks_exist) + { + m_blocks_longhash_table.clear(); + for (uint64_t i = 0; i < threads; i++) + { + thread_list.push_back(new boost::thread(&Blockchain::block_longhash_worker, this, height + (i * batches), std::cref(blocks[i]), std::ref(maps[i]))); + } + + for (size_t j = 0; j < thread_list.size(); j++) + { + thread_list[j]->join(); + delete thread_list[j]; + } + + thread_list.clear(); + + for (const auto & map : maps) + { + m_blocks_longhash_table.insert(map.begin(), map.end()); + } + } + } + + if (blocks_exist) + { + LOG_PRINT_L0("Skipping prepare blocks. Blocks exist.") + return true; + } + + m_fake_scan_time = 0; + m_fake_pow_calc_time = 0; + + m_scan_table.clear(); + m_check_tx_inputs_table.clear(); + + TIME_MEASURE_FINISH(prepare); + m_fake_pow_calc_time = prepare / blocks_entry.size(); + + if (blocks_entry.size() > 1 && threads > 1 && m_show_time_stats) + LOG_PRINT_L0("Prepare blocks took: " << prepare << " ms"); + + TIME_MEASURE_START(scantable); + + // [input] stores all unique amounts found + std::vector < uint64_t > amounts; + // [input] stores all absolute_offsets for each amount + std::map> offset_map; + // [output] stores all output_data_t for each absolute_offset + std::map> tx_map; + +#define SCAN_TABLE_QUIT(m) \ + do { \ + LOG_PRINT_L0(m) ;\ + m_scan_table.clear(); \ + return false; \ + } while(0); \ + + // generate sorted tables for all amounts and absolute offsets + for (const auto &entry : blocks_entry) + { + for (const auto &tx_blob : entry.txs) + { + crypto::hash tx_hash = null_hash; + crypto::hash tx_prefix_hash = null_hash; + transaction tx; + + if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash)) + SCAN_TABLE_QUIT("Could not parse tx from incoming blocks."); + + auto its = m_scan_table.find(tx_prefix_hash); + if (its != m_scan_table.end()) + SCAN_TABLE_QUIT("Duplicate tx found from incoming blocks."); + + m_scan_table.emplace(tx_prefix_hash, std::unordered_map>()); + its = m_scan_table.find(tx_prefix_hash); + assert(its != m_scan_table.end()); + + // get all amounts from tx.vin(s) + for (const auto &txin : tx.vin) + { + const txin_to_key &in_to_key = boost::get < txin_to_key > (txin); + + // check for duplicate + auto it = its->second.find(in_to_key.k_image); + if (it != its->second.end()) + SCAN_TABLE_QUIT("Duplicate key_image found from incoming blocks."); + + amounts.push_back(in_to_key.amount); + } + + // sort and remove duplicate amounts from amounts list + std::sort(amounts.begin(), amounts.end()); + auto last = std::unique(amounts.begin(), amounts.end()); + amounts.erase(last, amounts.end()); + + // add amount to the offset_map and tx_map + for (const uint64_t &amount : amounts) + { + if (offset_map.find(amount) == offset_map.end()) + offset_map.emplace(amount, std::vector()); + + if (tx_map.find(amount) == tx_map.end()) + tx_map.emplace(amount, std::vector()); + } + + // add new absolute_offsets to offset_map + for (const auto &txin : tx.vin) + { + const txin_to_key &in_to_key = boost::get < txin_to_key > (txin); + // no need to check for duplicate here. + auto absolute_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets); + for (const auto & offset : absolute_offsets) + offset_map[in_to_key.amount].push_back(offset); + + } + + // sort and remove duplicate absolute_offsets in offset_map + for (auto &offsets : offset_map) + { + std::sort(offsets.second.begin(), offsets.second.end()); + auto last = std::unique(offsets.second.begin(), offsets.second.end()); + offsets.second.erase(last, offsets.second.end()); + } + } + } + + // [output] stores all transactions for each tx_out_index::hash found + std::vector> transactions(amounts.size()); + + threads = std::thread::hardware_concurrency(); + if (!m_db->can_thread_bulk_indices()) + threads = 1; + + if (threads > 1) + { + boost::asio::io_service ioservice; + boost::thread_group threadpool; + std::unique_ptr < boost::asio::io_service::work > work(new boost::asio::io_service::work(ioservice)); + + for (uint64_t i = 0; i < threads; i++) + { + threadpool.create_thread(boost::bind(&boost::asio::io_service::run, &ioservice)); + } + + for (size_t i = 0; i < amounts.size(); i++) + { + uint64_t amount = amounts[i]; + ioservice.dispatch(boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount]), std::ref(transactions[i]))); + } + + work.reset(); + threadpool.join_all(); + ioservice.stop(); + } + else + { + for (size_t i = 0; i < amounts.size(); i++) + { + uint64_t amount = amounts[i]; + output_scan_worker(amount, offset_map[amount], tx_map[amount], transactions[i]); + } + } + + int total_txs = 0; + + // now generate a table for each tx_prefix and k_image hashes + for (const auto &entry : blocks_entry) + { + for (const auto &tx_blob : entry.txs) + { + crypto::hash tx_hash = null_hash; + crypto::hash tx_prefix_hash = null_hash; + transaction tx; + + if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash, tx_prefix_hash)) + SCAN_TABLE_QUIT("Could not parse tx from incoming blocks."); + + ++total_txs; + auto its = m_scan_table.find(tx_prefix_hash); + if (its == m_scan_table.end()) + SCAN_TABLE_QUIT("Tx not found on scan table from incoming blocks."); + + for (const auto &txin : tx.vin) + { + const txin_to_key &in_to_key = boost::get < txin_to_key > (txin); + auto needed_offsets = relative_output_offsets_to_absolute(in_to_key.key_offsets); + + std::vector outputs; + for (const uint64_t & offset_needed : needed_offsets) + { + size_t pos = 0; + bool found = false; + + for (const uint64_t &offset_found : offset_map[in_to_key.amount]) + { + if (offset_needed == offset_found) + { + found = true; + break; + } + + ++pos; + } + + if (found && pos < tx_map[in_to_key.amount].size()) + outputs.push_back(tx_map[in_to_key.amount].at(pos)); + else + break; + } + + its->second.emplace(in_to_key.k_image, outputs); + } + } + } + + TIME_MEASURE_FINISH(scantable); + if (total_txs > 0) + { + m_fake_scan_time = scantable / total_txs; + if(m_show_time_stats) + LOG_PRINT_L0("Prepare scantable took: " << scantable << " ms"); + } + + return true; +} + +void Blockchain::set_user_options(uint64_t maxthreads, uint64_t blocks_per_sync, blockchain_db_sync_mode sync_mode, bool fast_sync) +{ + m_db_sync_mode = sync_mode; + m_fast_sync = fast_sync; + m_db_blocks_per_sync = blocks_per_sync; + m_max_prepare_blocks_threads = maxthreads; } diff --git a/src/cryptonote_core/blockchain.h b/src/cryptonote_core/blockchain.h index 869b0b4b..c3d01c65 100644 --- a/src/cryptonote_core/blockchain.h +++ b/src/cryptonote_core/blockchain.h @@ -56,6 +56,13 @@ namespace cryptonote { class tx_memory_pool; + enum blockchain_db_sync_mode + { + db_sync, + db_async, + db_nosync + }; + /************************************************************************/ /* */ /************************************************************************/ @@ -94,6 +101,8 @@ namespace cryptonote crypto::hash get_block_id_by_height(uint64_t height) const; bool get_block_by_hash(const crypto::hash &h, block &blk) const; void get_all_known_block_ids(std::list &main, std::list &alt, std::list &invalid) const; + bool prepare_handle_incoming_blocks(const std::list &blocks); + bool cleanup_handle_incoming_blocks(bool force_sync = false); template void serialize(archive_t & ar, const unsigned int version); @@ -102,16 +111,13 @@ namespace cryptonote bool have_tx_keyimges_as_spent(const transaction &tx) const; bool have_tx_keyimg_as_spent(const crypto::key_image &key_im) const; - template - bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t& vis, uint64_t* pmax_related_block_height = NULL) const; - uint64_t get_current_blockchain_height() const; crypto::hash get_tail_id() const; crypto::hash get_tail_id(uint64_t& height) const; - difficulty_type get_difficulty_for_next_block() const; + difficulty_type get_difficulty_for_next_block(); bool add_new_block(const block& bl_, block_verification_context& bvc); bool reset_and_set_genesis_block(const block& b); - bool create_block_template(block& b, const account_public_address& miner_address, difficulty_type& di, uint64_t& height, const blobdata& ex_nonce) const; + bool create_block_template(block& b, const account_public_address& miner_address, difficulty_type& di, uint64_t& height, const blobdata& ex_nonce); bool have_block(const crypto::hash& id) const; size_t get_total_transactions() const; bool get_short_chain_history(std::list& ids) const; @@ -123,9 +129,8 @@ namespace cryptonote bool get_random_outs_for_amounts(const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::request& req, COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS::response& res) const; bool get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector& indexs) const; bool store_blockchain(); - bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, uint64_t* pmax_related_block_height = NULL) const; - bool check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height = NULL) const; - bool check_tx_inputs(const transaction& tx, uint64_t& pmax_used_block_height, crypto::hash& max_used_block_id) const; + + bool check_tx_inputs(const transaction& tx, uint64_t& pmax_used_block_height, crypto::hash& max_used_block_id, bool kept_by_block = false); uint64_t get_current_cumulative_blocksize_limit() const; bool is_storing_blockchain()const{return m_is_blockchain_storing;} uint64_t block_difficulty(uint64_t i) const; @@ -145,11 +150,23 @@ namespace cryptonote void set_enforce_dns_checkpoints(bool enforce); bool update_checkpoints(const std::string& file_path, bool check_dns); + // user options, must be called before calling init() + void set_user_options(uint64_t block_threads, uint64_t blocks_per_sync, + blockchain_db_sync_mode sync_mode, bool fast_sync); + + void set_show_time_stats(bool stats) { m_show_time_stats = stats; } + BlockchainDB& get_db() { return *m_db; } + void output_scan_worker(const uint64_t amount,const std::vector &offsets, + std::vector &outputs, std::unordered_map &txs) const; + + void block_longhash_worker(const uint64_t height, const std::vector &blocks, + std::unordered_map &map) const; private: typedef std::unordered_map blocks_by_id_index; typedef std::unordered_map transactions_container; @@ -171,6 +188,29 @@ namespace cryptonote key_images_container m_spent_keys; size_t m_current_block_cumul_sz_limit; + std::unordered_map>> m_scan_table; + std::unordered_map> m_check_tx_inputs_table; + std::unordered_map m_blocks_longhash_table; + + // SHA-3 hashes for each block and for fast pow checking + std::vector m_blocks_hash_check; + std::vector m_blocks_txs_check; + + blockchain_db_sync_mode m_db_sync_mode; + bool m_fast_sync; + bool m_show_time_stats; + uint64_t m_db_blocks_per_sync; + uint64_t m_max_prepare_blocks_threads; + uint64_t m_fake_pow_calc_time; + uint64_t m_fake_scan_time; + uint64_t m_sync_counter; + std::vector m_timestamps; + std::vector m_difficulties; + uint64_t m_timestamps_and_difficulties_height; + + boost::asio::io_service m_async_service; + boost::thread_group m_async_pool; + std::unique_ptr m_async_work_idle; // all alternative chains blocks_ext_by_hash m_alternative_chains; // crypto::hash -> block_extended_info @@ -185,6 +225,11 @@ namespace cryptonote std::atomic m_is_blockchain_storing; bool m_enforce_dns_checkpoints; + template + inline bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height = NULL) const; + bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, std::vector &output_keys, uint64_t* pmax_related_block_height); + bool check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height = NULL); + bool switch_to_alternative_blockchain(std::list& alt_chain, bool discard_disconnected_chain); block pop_block_from_blockchain(); bool purge_transaction_from_blockchain(const crypto::hash& tx_id); @@ -213,6 +258,9 @@ namespace cryptonote bool update_next_cumulative_size_limit(); bool check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const; + void get_timestamp_and_difficulty(uint64_t ×tamp, difficulty_type &difficulty, const int offset) const; + void check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image, + const std::vector &pubkeys, const std::vector &sig, uint64_t &result); }; diff --git a/src/cryptonote_core/blockchain_storage.h b/src/cryptonote_core/blockchain_storage.h index 2c2fb525..aeeef31b 100644 --- a/src/cryptonote_core/blockchain_storage.h +++ b/src/cryptonote_core/blockchain_storage.h @@ -104,9 +104,6 @@ namespace cryptonote bool have_tx_keyimg_as_spent(const crypto::key_image &key_im) const; const transaction *get_tx(const crypto::hash &id) const; - template - bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t& vis, uint64_t* pmax_related_block_height = NULL) const; - uint64_t get_current_blockchain_height() const; crypto::hash get_tail_id() const; crypto::hash get_tail_id(uint64_t& height) const; @@ -127,9 +124,7 @@ namespace cryptonote bool get_backward_blocks_sizes(size_t from_height, std::vector& sz, size_t count) const; bool get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector& indexs) const; bool store_blockchain(); - bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, uint64_t* pmax_related_block_height = NULL) const; - bool check_tx_inputs(const transaction& tx, const crypto::hash& tx_prefix_hash, uint64_t* pmax_used_block_height = NULL) const; - bool check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height = NULL) const; + bool check_tx_inputs(const transaction& tx, uint64_t& pmax_used_block_height, crypto::hash& max_used_block_id) const; uint64_t get_current_cumulative_blocksize_limit() const; bool is_storing_blockchain()const{return m_is_blockchain_storing;} @@ -229,6 +224,13 @@ namespace cryptonote bool m_enforce_dns_checkpoints; bool m_testnet; + // made private for consistency with blockchain.h + template + bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t& vis, uint64_t* pmax_related_block_height = NULL) const; + bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, const std::vector& sig, uint64_t* pmax_related_block_height = NULL) const; + bool check_tx_inputs(const transaction& tx, const crypto::hash& tx_prefix_hash, uint64_t* pmax_used_block_height = NULL) const; + bool check_tx_inputs(const transaction& tx, uint64_t* pmax_used_block_height = NULL) const; + bool switch_to_alternative_blockchain(std::list& alt_chain, bool discard_disconnected_chain); bool pop_block_from_blockchain(); bool purge_block_data_from_blockchain(const block& b, size_t processed_tx_count); diff --git a/src/cryptonote_core/cryptonote_basic_impl.cpp b/src/cryptonote_core/cryptonote_basic_impl.cpp index abdef64f..1319a2ef 100644 --- a/src/cryptonote_core/cryptonote_basic_impl.cpp +++ b/src/cryptonote_core/cryptonote_basic_impl.cpp @@ -99,7 +99,11 @@ namespace cryptonote { assert(current_block_size < std::numeric_limits::max()); uint64_t product_hi; - uint64_t product_lo = mul128(base_reward, current_block_size * (2 * median_size - current_block_size), &product_hi); + // BUGFIX: 32-bit saturation bug (e.g. ARM7), the result was being + // treated as 32-bit by default. + uint64_t multiplicand = 2 * median_size - current_block_size; + multiplicand *= current_block_size; + uint64_t product_lo = mul128(base_reward, multiplicand, &product_hi); uint64_t reward_hi; uint64_t reward_lo; diff --git a/src/cryptonote_core/cryptonote_core.cpp b/src/cryptonote_core/cryptonote_core.cpp index 2037a8c2..7b96988e 100644 --- a/src/cryptonote_core/cryptonote_core.cpp +++ b/src/cryptonote_core/cryptonote_core.cpp @@ -46,7 +46,7 @@ using namespace epee; #include "cryptonote_core/checkpoints_create.h" #include "blockchain_db/blockchain_db.h" #include "blockchain_db/lmdb/db_lmdb.h" -#ifndef STATICLIB +#if defined(BERKELEY_DB) #include "blockchain_db/berkeleydb/db_bdb.h" #endif @@ -212,18 +212,27 @@ namespace cryptonote #if BLOCKCHAIN_DB == DB_LMDB std::string db_type = command_line::get_arg(vm, daemon_args::arg_db_type); + std::string db_sync_mode = command_line::get_arg(vm, daemon_args::arg_db_sync_mode); + bool fast_sync = command_line::get_arg(vm, daemon_args::arg_fast_block_sync) != 0; + uint64_t blocks_threads = command_line::get_arg(vm, daemon_args::arg_prep_blocks_threads); BlockchainDB* db = nullptr; + uint64_t BDB_FAST_MODE = 0; + uint64_t BDB_FASTEST_MODE = 0; + uint64_t BDB_SAFE_MODE = 0; if (db_type == "lmdb") { db = new BlockchainLMDB(); } else if (db_type == "berkeley") { -#ifndef STATICLIB +#if defined(BERKELEY_DB) db = new BlockchainBDB(); + BDB_FAST_MODE = DB_TXN_WRITE_NOSYNC; + BDB_FASTEST_MODE = DB_TXN_NOSYNC; + BDB_SAFE_MODE = DB_TXN_SYNC; #else - LOG_ERROR("BlockchainBDB not supported on STATIC builds"); + LOG_ERROR("BerkeleyDB support disabled."); return false; #endif } @@ -240,9 +249,71 @@ namespace cryptonote LOG_PRINT_L0("Loading blockchain from folder " << folder.string() << " ..."); const std::string filename = folder.string(); + // temporarily default to fastest:async:1000 + blockchain_db_sync_mode sync_mode = db_async; + uint64_t blocks_per_sync = 1000; + try { - db->open(filename); + uint64_t db_flags = 0; + bool islmdb = db_type == "lmdb"; + + std::vector options; + boost::trim(db_sync_mode); + boost::split(options, db_sync_mode, boost::is_any_of(" :")); + + for(const auto &option : options) + LOG_PRINT_L0("option: " << option); + + // temporarily default to fastest:async:1000 + uint64_t DEFAULT_FLAGS = islmdb ? MDB_WRITEMAP | MDB_MAPASYNC | MDB_NORDAHEAD | MDB_NOMETASYNC | MDB_NOSYNC : + BDB_FASTEST_MODE; + + if(options.size() == 0) + { + // temporarily default to fastest:async:1000 + db_flags = DEFAULT_FLAGS; + } + + bool safemode = false; + if(options.size() >= 1) + { + if(options[0] == "safe") + { + safemode = true; + db_flags = islmdb ? MDB_NORDAHEAD : BDB_SAFE_MODE; + sync_mode = db_nosync; + } + else if(options[0] == "fast") + db_flags = islmdb ? MDB_NOMETASYNC | MDB_NOSYNC | MDB_NORDAHEAD : BDB_FAST_MODE; + else if(options[0] == "fastest") + db_flags = islmdb ? MDB_WRITEMAP | MDB_MAPASYNC | MDB_NORDAHEAD | MDB_NOMETASYNC | MDB_NOSYNC : BDB_FASTEST_MODE; + else + db_flags = DEFAULT_FLAGS; + } + + if(options.size() >= 2 && !safemode) + { + if(options[1] == "sync") + sync_mode = db_sync; + else if(options[1] == "async") + sync_mode = db_async; + } + + if(options.size() >= 3 && !safemode) + { + blocks_per_sync = atoll(options[2].c_str()); + if(blocks_per_sync > 5000) + blocks_per_sync = 5000; + if(blocks_per_sync == 0) + blocks_per_sync = 1; + } + + bool auto_remove_logs = command_line::get_arg(vm, daemon_args::arg_db_auto_remove_logs) != 0; + db->set_auto_remove_logs(auto_remove_logs); + db->open(filename, db_flags); + if(!db->m_open) + return false; } catch (const DB_ERROR& e) { @@ -250,7 +321,13 @@ namespace cryptonote return false; } + m_blockchain_storage.set_user_options(blocks_threads, + blocks_per_sync, sync_mode, fast_sync); + r = m_blockchain_storage.init(db, m_testnet); + + bool show_time_stats = command_line::get_arg(vm, daemon_args::arg_show_time_stats) != 0; + m_blockchain_storage.set_show_time_stats(show_time_stats); #else r = m_blockchain_storage.init(m_config_folder, m_testnet); #endif @@ -587,6 +664,25 @@ namespace cryptonote { return m_blockchain_storage.add_new_block(b, bvc); } + + //----------------------------------------------------------------------------------------------- + bool core::prepare_handle_incoming_blocks(const std::list &blocks) + { +#if BLOCKCHAIN_DB == DB_LMDB + m_blockchain_storage.prepare_handle_incoming_blocks(blocks); +#endif + return true; + } + + //----------------------------------------------------------------------------------------------- + bool core::cleanup_handle_incoming_blocks(bool force_sync) + { +#if BLOCKCHAIN_DB == DB_LMDB + m_blockchain_storage.cleanup_handle_incoming_blocks(force_sync); +#endif + return true; + } + //----------------------------------------------------------------------------------------------- bool core::handle_incoming_block(const blobdata& block_blob, block_verification_context& bvc, bool update_miner_blocktemplate) { @@ -678,7 +774,8 @@ namespace cryptonote return m_blockchain_storage.get_block_id_by_height(height); } //----------------------------------------------------------------------------------------------- - bool core::get_block_by_hash(const crypto::hash &h, block &blk) { + bool core::get_block_by_hash(const crypto::hash &h, block &blk) + { return m_blockchain_storage.get_block_by_hash(h, blk); } //----------------------------------------------------------------------------------------------- @@ -723,11 +820,13 @@ namespace cryptonote return true; } //----------------------------------------------------------------------------------------------- - void core::set_target_blockchain_height(uint64_t target_blockchain_height) { + void core::set_target_blockchain_height(uint64_t target_blockchain_height) + { m_target_blockchain_height = target_blockchain_height; } //----------------------------------------------------------------------------------------------- - uint64_t core::get_target_blockchain_height() const { + uint64_t core::get_target_blockchain_height() const + { return m_target_blockchain_height; } //----------------------------------------------------------------------------------------------- diff --git a/src/cryptonote_core/cryptonote_core.h b/src/cryptonote_core/cryptonote_core.h index ff187b4c..de953da0 100644 --- a/src/cryptonote_core/cryptonote_core.h +++ b/src/cryptonote_core/cryptonote_core.h @@ -66,6 +66,9 @@ namespace cryptonote bool on_idle(); bool handle_incoming_tx(const blobdata& tx_blob, tx_verification_context& tvc, bool keeped_by_block); bool handle_incoming_block(const blobdata& block_blob, block_verification_context& bvc, bool update_miner_blocktemplate = true); + bool prepare_handle_incoming_blocks(const std::list &blocks); + bool cleanup_handle_incoming_blocks(bool force_sync = false); + bool check_incoming_block_size(const blobdata& block_blob); i_cryptonote_protocol* get_protocol(){return m_pprotocol;} diff --git a/src/cryptonote_core/difficulty.cpp b/src/cryptonote_core/difficulty.cpp index 6486d812..2b546679 100644 --- a/src/cryptonote_core/difficulty.cpp +++ b/src/cryptonote_core/difficulty.cpp @@ -45,10 +45,7 @@ namespace cryptonote { using std::uint64_t; using std::vector; -#if defined(_MSC_VER) || defined(__MINGW32__) -#include -#include - +#if defined(__x86_64__) static inline void mul(uint64_t a, uint64_t b, uint64_t &low, uint64_t &high) { low = mul128(a, b, &high); } diff --git a/src/cryptonote_core/tx_pool.cpp b/src/cryptonote_core/tx_pool.cpp index f6dbf141..12fd3fe6 100644 --- a/src/cryptonote_core/tx_pool.cpp +++ b/src/cryptonote_core/tx_pool.cpp @@ -128,7 +128,11 @@ namespace cryptonote crypto::hash max_used_block_id = null_hash; uint64_t max_used_block_height = 0; +#if BLOCKCHAIN_DB == DB_LMDB + bool ch_inp_res = m_blockchain.check_tx_inputs(tx, max_used_block_height, max_used_block_id, kept_by_block); +#else bool ch_inp_res = m_blockchain.check_tx_inputs(tx, max_used_block_height, max_used_block_id); +#endif CRITICAL_REGION_LOCAL(m_transactions_lock); if(!ch_inp_res) { @@ -203,6 +207,9 @@ namespace cryptonote bool tx_memory_pool::remove_transaction_keyimages(const transaction& tx) { CRITICAL_REGION_LOCAL(m_transactions_lock); + // ND: Speedup + // 1. Move transaction hash calcuation outside of loop. ._. + crypto::hash actual_hash = get_transaction_hash(tx); BOOST_FOREACH(const txin_v& vi, tx.vin) { CHECKED_GET_SPECIFIC_VARIANT(vi, const txin_to_key, txin, false); @@ -211,11 +218,11 @@ namespace cryptonote << "transaction id = " << get_transaction_hash(tx)); std::unordered_set& key_image_set = it->second; CHECK_AND_ASSERT_MES(key_image_set.size(), false, "empty key_image set, img=" << txin.k_image << ENDL - << "transaction id = " << get_transaction_hash(tx)); + << "transaction id = " << actual_hash); - auto it_in_set = key_image_set.find(get_transaction_hash(tx)); + auto it_in_set = key_image_set.find(actual_hash); CHECK_AND_ASSERT_MES(it_in_set != key_image_set.end(), false, "transaction id not found in key_image set, img=" << txin.k_image << ENDL - << "transaction id = " << get_transaction_hash(tx)); + << "transaction id = " << actual_hash); key_image_set.erase(it_in_set); if(!key_image_set.size()) { diff --git a/src/cryptonote_protocol/cryptonote_protocol_handler.inl b/src/cryptonote_protocol/cryptonote_protocol_handler.inl index 1cf66521..a269b54b 100644 --- a/src/cryptonote_protocol/cryptonote_protocol_handler.inl +++ b/src/cryptonote_protocol/cryptonote_protocol_handler.inl @@ -315,6 +315,9 @@ namespace cryptonote if(context.m_state != cryptonote_connection_context::state_normal) return 1; + std::list blocks; + blocks.push_back(arg.b); + m_core.prepare_handle_incoming_blocks(blocks); for(auto tx_blob_it = arg.b.txs.begin(); tx_blob_it!=arg.b.txs.end();tx_blob_it++) { cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc); @@ -323,6 +326,7 @@ namespace cryptonote { LOG_PRINT_CCONTEXT_L1("Block verification failed: transaction verification failed, dropping connection"); m_p2p->drop_connection(context); + m_core.cleanup_handle_incoming_blocks(); return 1; } } @@ -331,6 +335,7 @@ namespace cryptonote block_verification_context bvc = boost::value_initialized(); m_core.pause_mine(); m_core.handle_incoming_block(arg.b.block, bvc); // got block from handle_notify_new_block + m_core.cleanup_handle_incoming_blocks(true); m_core.resume_mine(); if(bvc.m_verifivation_failed) { @@ -536,7 +541,7 @@ namespace cryptonote if (m_core.get_test_drop_download() && m_core.get_test_drop_download_height()) { // DISCARD BLOCKS for testing - + m_core.prepare_handle_incoming_blocks(arg.blocks); BOOST_FOREACH(const block_complete_entry& block_entry, arg.blocks) { // process transactions @@ -550,6 +555,7 @@ namespace cryptonote LOG_ERROR_CCONTEXT("transaction verification failed on NOTIFY_RESPONSE_GET_OBJECTS, \r\ntx_id = " << epee::string_tools::pod_to_hex(get_blob_hash(tx_blob)) << ", dropping connection"); m_p2p->drop_connection(context); + m_core.cleanup_handle_incoming_blocks(); return 1; } } @@ -566,12 +572,14 @@ namespace cryptonote { LOG_PRINT_CCONTEXT_L1("Block verification failed, dropping connection"); m_p2p->drop_connection(context); + m_core.cleanup_handle_incoming_blocks(); return 1; } if(bvc.m_marked_as_orphaned) { LOG_PRINT_CCONTEXT_L1("Block received at sync phase was marked as orphaned, dropping connection"); m_p2p->drop_connection(context); + m_core.cleanup_handle_incoming_blocks(); return 1; } @@ -582,7 +590,7 @@ namespace cryptonote epee::net_utils::data_logger::get_instance().add_data("block_processing", 1); } // each download block - + m_core.cleanup_handle_incoming_blocks(); } // if not DISCARD BLOCK diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index f0671294..8275429d 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -26,6 +26,12 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +set(blocksdat "") +if(APPLE AND PER_BLOCK_CHECKPOINT) + add_custom_command(OUTPUT blocksdat.o COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ${CMAKE_C_COMPILER} -o stub.o -c stub.c COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR} && ld -r -sectcreate __DATA __blocks_dat ../blocks/blocks.dat -o ${CMAKE_CURRENT_BINARY_DIR}/blocksdat.o stub.o) + set(blocksdat "blocksdat.o") +endif() + set(daemon_sources command_parser_executor.cpp command_server.cpp @@ -69,7 +75,9 @@ bitmonero_private_headers(daemon bitmonero_add_executable(daemon ${daemon_sources} ${daemon_headers} - ${daemon_private_headers}) + ${daemon_private_headers} + ${blocksdat} +) target_link_libraries(daemon LINK_PRIVATE rpc @@ -90,8 +98,7 @@ target_link_libraries(daemon ${CMAKE_THREAD_LIBS_INIT} ${UPNP_LIBRARIES} ${EXTRA_LIBRARIES}) -add_dependencies(daemon - version) +add_dependencies(daemon version) set_property(TARGET daemon PROPERTY OUTPUT_NAME "bitmonerod") diff --git a/src/daemon/command_line_args.h b/src/daemon/command_line_args.h index 2aa212b5..ba98a6ea 100644 --- a/src/daemon/command_line_args.h +++ b/src/daemon/command_line_args.h @@ -75,7 +75,32 @@ namespace daemon_args , "Specify database type" , "lmdb" }; - + const command_line::arg_descriptor arg_prep_blocks_threads = { + "prep-blocks-threads" + , "Max number of threads to use when preparing block hashes in groups." + , 4 + }; + const command_line::arg_descriptor arg_fast_block_sync = { + "fast-block-sync" + , "Test fast block-sync option using temporarily embedded known block hashes." + , 1 + }; + const command_line::arg_descriptor arg_show_time_stats = { + "show-time-stats" + , "Show time-stats when processing blocks/txs and disk synchronization." + , 1 + }; + const command_line::arg_descriptor arg_db_auto_remove_logs = { + "db-auto-remove-logs" + , "For BerkeleyDB only. Remove transactions logs automatically." + , 1 + }; + const command_line::arg_descriptor arg_db_sync_mode = { + "db-sync-mode" + , "Specify sync option, using format [safe|fast|fastest]:[sync|async]:[nblocks_per_sync]." + , "fastest:async:1000" + }; +; } // namespace daemon_args #endif // DAEMON_COMMAND_LINE_ARGS_H diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index b574200b..547f4bd9 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -86,6 +86,12 @@ int main(int argc, char const * argv[]) command_line::add_arg(core_settings, daemon_args::arg_testnet_on); command_line::add_arg(core_settings, daemon_args::arg_dns_checkpoints); command_line::add_arg(core_settings, daemon_args::arg_db_type); + command_line::add_arg(core_settings, daemon_args::arg_prep_blocks_threads); + command_line::add_arg(core_settings, daemon_args::arg_fast_block_sync); + command_line::add_arg(core_settings, daemon_args::arg_db_sync_mode); + command_line::add_arg(core_settings, daemon_args::arg_show_time_stats); + command_line::add_arg(core_settings, daemon_args::arg_db_auto_remove_logs); + daemonizer::init_options(hidden_options, visible_options); daemonize::t_executor::init_options(core_settings); diff --git a/src/p2p/net_node.h b/src/p2p/net_node.h index df726703..3eb12520 100644 --- a/src/p2p/net_node.h +++ b/src/p2p/net_node.h @@ -209,11 +209,12 @@ namespace nodetool bool set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit); bool set_rate_down_limit(const boost::program_options::variables_map& vm, int64_t limit); - bool set_rate_limit(const boost::program_options::variables_map& vm, uint64_t limit); + bool set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit); void kill() { ///< will be called e.g. from deinit() _info("Killing the net_node"); is_closing = true; + if(mPeersLoggerThread != nullptr) mPeersLoggerThread->join(); // make sure the thread finishes _info("Joined extra background net_node threads"); } diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl index 66ee5cb5..bce3ab5a 100644 --- a/src/p2p/net_node.inl +++ b/src/p2p/net_node.inl @@ -67,6 +67,8 @@ namespace nodetool { namespace { + const int64_t default_limit_up = 2048; + const int64_t default_limit_down = 8192; const command_line::arg_descriptor arg_p2p_bind_ip = {"p2p-bind-ip", "Interface for p2p network protocol", "0.0.0.0"}; const command_line::arg_descriptor arg_p2p_bind_port = { "p2p-bind-port" @@ -93,7 +95,7 @@ namespace nodetool const command_line::arg_descriptor arg_limit_rate_up = {"limit-rate-up", "set limit-rate-up [kB/s]", -1}; const command_line::arg_descriptor arg_limit_rate_down = {"limit-rate-down", "set limit-rate-down [kB/s]", -1}; - const command_line::arg_descriptor arg_limit_rate = {"limit-rate", "set limit-rate [kB/s]", 128}; + const command_line::arg_descriptor arg_limit_rate = {"limit-rate", "set limit-rate [kB/s]", -1}; const command_line::arg_descriptor arg_save_graph = {"save-graph", "Save data for dr monero", false}; } @@ -1446,7 +1448,7 @@ namespace nodetool this->islimitup=true; if (limit==-1) { - limit=128; + limit=default_limit_up; this->islimitup=false; } @@ -1461,7 +1463,7 @@ namespace nodetool { this->islimitdown=true; if(limit==-1) { - limit=128; + limit=default_limit_down; this->islimitdown=false; } limit *= 1024; @@ -1471,19 +1473,31 @@ namespace nodetool } template - bool node_server::set_rate_limit(const boost::program_options::variables_map& vm, uint64_t limit) + bool node_server::set_rate_limit(const boost::program_options::variables_map& vm, int64_t limit) { + int64_t limit_up = 0; + int64_t limit_down = 0; + + if(limit == -1) + { + limit_up = default_limit_up * 1024; + limit_down = default_limit_down * 1024; + } + else + { + limit_up = limit * 1024; + limit_down = limit * 1024; + } limit *= 1024; if(this->islimitdown==false && this->islimitup==false) { - epee::net_utils::connection >::set_rate_up_limit( limit ); - epee::net_utils::connection >::set_rate_down_limit( limit ); - LOG_PRINT_L0("Set limit to " << limit/1024 << " kB/s"); + epee::net_utils::connection >::set_rate_up_limit(limit_up); + epee::net_utils::connection >::set_rate_down_limit(limit_down); } else if(this->islimitdown==false && this->islimitup==true ) { - epee::net_utils::connection >::set_rate_down_limit( limit ); + epee::net_utils::connection >::set_rate_down_limit(limit_down); } else if(this->islimitdown==true && this->islimitup==false ) { - epee::net_utils::connection >::set_rate_up_limit( limit ); + epee::net_utils::connection >::set_rate_up_limit(limit_up); } return true; diff --git a/tests/core_proxy/core_proxy.h b/tests/core_proxy/core_proxy.h index 09582da4..6b131e9e 100644 --- a/tests/core_proxy/core_proxy.h +++ b/tests/core_proxy/core_proxy.h @@ -85,5 +85,7 @@ namespace tests cryptonote::blockchain_storage &get_blockchain_storage() { throw std::runtime_error("Called invalid member function: please never call get_blockchain_storage on the TESTING class proxy_core."); } bool get_test_drop_download() {return true;} bool get_test_drop_download_height() {return true;} + bool prepare_handle_incoming_blocks(const std::list &blocks) { return true; } + bool cleanup_handle_incoming_blocks(bool force_sync = false) { return true; } }; }