diff options
Diffstat (limited to 'src/test/fuzz')
-rw-r--r-- | src/test/fuzz/util.cpp | 187 | ||||
-rw-r--r-- | src/test/fuzz/util.h | 172 | ||||
-rw-r--r-- | src/test/fuzz/versionbits.cpp | 63 |
3 files changed, 224 insertions, 198 deletions
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp index cf5244e314..b8d846a995 100644 --- a/src/test/fuzz/util.cpp +++ b/src/test/fuzz/util.cpp @@ -7,15 +7,196 @@ #include <util/rbf.h> #include <version.h> -bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred ) const +FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider) + : m_fuzzed_data_provider{fuzzed_data_provider} { - if (!m_fuzzed_data_provider.ConsumeBool()) { + m_socket = fuzzed_data_provider.ConsumeIntegralInRange<SOCKET>(INVALID_SOCKET - 1, INVALID_SOCKET); +} + +FuzzedSock::~FuzzedSock() +{ + // Sock::~Sock() will be called after FuzzedSock::~FuzzedSock() and it will call + // Sock::Reset() (not FuzzedSock::Reset()!) which will call CloseSocket(m_socket). + // Avoid closing an arbitrary file descriptor (m_socket is just a random very high number which + // theoretically may concide with a real opened file descriptor). + Reset(); +} + +FuzzedSock& FuzzedSock::operator=(Sock&& other) +{ + assert(false && "Move of Sock into FuzzedSock not allowed."); + return *this; +} + +void FuzzedSock::Reset() +{ + m_socket = INVALID_SOCKET; +} + +ssize_t FuzzedSock::Send(const void* data, size_t len, int flags) const +{ + constexpr std::array send_errnos{ + EACCES, + EAGAIN, + EALREADY, + EBADF, + ECONNRESET, + EDESTADDRREQ, + EFAULT, + EINTR, + EINVAL, + EISCONN, + EMSGSIZE, + ENOBUFS, + ENOMEM, + ENOTCONN, + ENOTSOCK, + EOPNOTSUPP, + EPIPE, + EWOULDBLOCK, + }; + if (m_fuzzed_data_provider.ConsumeBool()) { + return len; + } + const ssize_t r = m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(-1, len); + if (r == -1) { + SetFuzzedErrNo(m_fuzzed_data_provider, send_errnos); + } + return r; +} + +ssize_t FuzzedSock::Recv(void* buf, size_t len, int flags) const +{ + // Have a permanent error at recv_errnos[0] because when the fuzzed data is exhausted + // SetFuzzedErrNo() will always return the first element and we want to avoid Recv() + // returning -1 and setting errno to EAGAIN repeatedly. + constexpr std::array recv_errnos{ + ECONNREFUSED, + EAGAIN, + EBADF, + EFAULT, + EINTR, + EINVAL, + ENOMEM, + ENOTCONN, + ENOTSOCK, + EWOULDBLOCK, + }; + assert(buf != nullptr || len == 0); + if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) { + const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; + if (r == -1) { + SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos); + } + return r; + } + std::vector<uint8_t> random_bytes; + bool pad_to_len_bytes{m_fuzzed_data_provider.ConsumeBool()}; + if (m_peek_data.has_value()) { + // `MSG_PEEK` was used in the preceding `Recv()` call, return `m_peek_data`. + random_bytes.assign({m_peek_data.value()}); + if ((flags & MSG_PEEK) == 0) { + m_peek_data.reset(); + } + pad_to_len_bytes = false; + } else if ((flags & MSG_PEEK) != 0) { + // New call with `MSG_PEEK`. + random_bytes = m_fuzzed_data_provider.ConsumeBytes<uint8_t>(1); + if (!random_bytes.empty()) { + m_peek_data = random_bytes[0]; + pad_to_len_bytes = false; + } + } else { + random_bytes = m_fuzzed_data_provider.ConsumeBytes<uint8_t>( + m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, len)); + } + if (random_bytes.empty()) { + const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; + if (r == -1) { + SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos); + } + return r; + } + std::memcpy(buf, random_bytes.data(), random_bytes.size()); + if (pad_to_len_bytes) { + if (len > random_bytes.size()) { + std::memset((char*)buf + random_bytes.size(), 0, len - random_bytes.size()); + } + return len; + } + if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) { + std::this_thread::sleep_for(std::chrono::milliseconds{2}); + } + return random_bytes.size(); +} + +int FuzzedSock::Connect(const sockaddr*, socklen_t) const +{ + // Have a permanent error at connect_errnos[0] because when the fuzzed data is exhausted + // SetFuzzedErrNo() will always return the first element and we want to avoid Connect() + // returning -1 and setting errno to EAGAIN repeatedly. + constexpr std::array connect_errnos{ + ECONNREFUSED, + EAGAIN, + ECONNRESET, + EHOSTUNREACH, + EINPROGRESS, + EINTR, + ENETUNREACH, + ETIMEDOUT, + }; + if (m_fuzzed_data_provider.ConsumeBool()) { + SetFuzzedErrNo(m_fuzzed_data_provider, connect_errnos); + return -1; + } + return 0; +} + +int FuzzedSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const +{ + constexpr std::array getsockopt_errnos{ + ENOMEM, + ENOBUFS, + }; + if (m_fuzzed_data_provider.ConsumeBool()) { + SetFuzzedErrNo(m_fuzzed_data_provider, getsockopt_errnos); + return -1; + } + if (opt_val == nullptr) { + return 0; + } + std::memcpy(opt_val, + ConsumeFixedLengthByteVector(m_fuzzed_data_provider, *opt_len).data(), + *opt_len); + return 0; +} + +bool FuzzedSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const +{ + constexpr std::array wait_errnos{ + EBADF, + EINTR, + EINVAL, + }; + if (m_fuzzed_data_provider.ConsumeBool()) { + SetFuzzedErrNo(m_fuzzed_data_provider, wait_errnos); return false; } - if (occurred) *occurred = 0; + if (occurred != nullptr) { + *occurred = m_fuzzed_data_provider.ConsumeBool() ? requested : 0; + } return true; } +bool FuzzedSock::IsConnected(std::string& errmsg) const +{ + if (m_fuzzed_data_provider.ConsumeBool()) { + return true; + } + errmsg = "disconnected at random by the fuzzer"; + return false; +} + void FillNode(FuzzedDataProvider& fuzzed_data_provider, CNode& node, bool init_version) noexcept { const ServiceFlags remote_services = ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS); diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index adcdd71748..8f4f87fbdc 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -575,179 +575,25 @@ class FuzzedSock : public Sock mutable std::optional<uint8_t> m_peek_data; public: - explicit FuzzedSock(FuzzedDataProvider& fuzzed_data_provider) : m_fuzzed_data_provider{fuzzed_data_provider} - { - m_socket = fuzzed_data_provider.ConsumeIntegral<SOCKET>(); - } + explicit FuzzedSock(FuzzedDataProvider& fuzzed_data_provider); - ~FuzzedSock() override - { - // Sock::~Sock() will be called after FuzzedSock::~FuzzedSock() and it will call - // Sock::Reset() (not FuzzedSock::Reset()!) which will call CloseSocket(m_socket). - // Avoid closing an arbitrary file descriptor (m_socket is just a random number which - // may concide with a real opened file descriptor). - Reset(); - } + ~FuzzedSock() override; - FuzzedSock& operator=(Sock&& other) override - { - assert(false && "Move of Sock into FuzzedSock not allowed."); - return *this; - } + FuzzedSock& operator=(Sock&& other) override; - void Reset() override - { - m_socket = INVALID_SOCKET; - } + void Reset() override; - ssize_t Send(const void* data, size_t len, int flags) const override - { - constexpr std::array send_errnos{ - EACCES, - EAGAIN, - EALREADY, - EBADF, - ECONNRESET, - EDESTADDRREQ, - EFAULT, - EINTR, - EINVAL, - EISCONN, - EMSGSIZE, - ENOBUFS, - ENOMEM, - ENOTCONN, - ENOTSOCK, - EOPNOTSUPP, - EPIPE, - EWOULDBLOCK, - }; - if (m_fuzzed_data_provider.ConsumeBool()) { - return len; - } - const ssize_t r = m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(-1, len); - if (r == -1) { - SetFuzzedErrNo(m_fuzzed_data_provider, send_errnos); - } - return r; - } + ssize_t Send(const void* data, size_t len, int flags) const override; - ssize_t Recv(void* buf, size_t len, int flags) const override - { - // Have a permanent error at recv_errnos[0] because when the fuzzed data is exhausted - // SetFuzzedErrNo() will always return the first element and we want to avoid Recv() - // returning -1 and setting errno to EAGAIN repeatedly. - constexpr std::array recv_errnos{ - ECONNREFUSED, - EAGAIN, - EBADF, - EFAULT, - EINTR, - EINVAL, - ENOMEM, - ENOTCONN, - ENOTSOCK, - EWOULDBLOCK, - }; - assert(buf != nullptr || len == 0); - if (len == 0 || m_fuzzed_data_provider.ConsumeBool()) { - const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; - if (r == -1) { - SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos); - } - return r; - } - std::vector<uint8_t> random_bytes; - bool pad_to_len_bytes{m_fuzzed_data_provider.ConsumeBool()}; - if (m_peek_data.has_value()) { - // `MSG_PEEK` was used in the preceding `Recv()` call, return `m_peek_data`. - random_bytes.assign({m_peek_data.value()}); - if ((flags & MSG_PEEK) == 0) { - m_peek_data.reset(); - } - pad_to_len_bytes = false; - } else if ((flags & MSG_PEEK) != 0) { - // New call with `MSG_PEEK`. - random_bytes = m_fuzzed_data_provider.ConsumeBytes<uint8_t>(1); - if (!random_bytes.empty()) { - m_peek_data = random_bytes[0]; - pad_to_len_bytes = false; - } - } else { - random_bytes = m_fuzzed_data_provider.ConsumeBytes<uint8_t>( - m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, len)); - } - if (random_bytes.empty()) { - const ssize_t r = m_fuzzed_data_provider.ConsumeBool() ? 0 : -1; - if (r == -1) { - SetFuzzedErrNo(m_fuzzed_data_provider, recv_errnos); - } - return r; - } - std::memcpy(buf, random_bytes.data(), random_bytes.size()); - if (pad_to_len_bytes) { - if (len > random_bytes.size()) { - std::memset((char*)buf + random_bytes.size(), 0, len - random_bytes.size()); - } - return len; - } - if (m_fuzzed_data_provider.ConsumeBool() && std::getenv("FUZZED_SOCKET_FAKE_LATENCY") != nullptr) { - std::this_thread::sleep_for(std::chrono::milliseconds{2}); - } - return random_bytes.size(); - } + ssize_t Recv(void* buf, size_t len, int flags) const override; - int Connect(const sockaddr*, socklen_t) const override - { - // Have a permanent error at connect_errnos[0] because when the fuzzed data is exhausted - // SetFuzzedErrNo() will always return the first element and we want to avoid Connect() - // returning -1 and setting errno to EAGAIN repeatedly. - constexpr std::array connect_errnos{ - ECONNREFUSED, - EAGAIN, - ECONNRESET, - EHOSTUNREACH, - EINPROGRESS, - EINTR, - ENETUNREACH, - ETIMEDOUT, - }; - if (m_fuzzed_data_provider.ConsumeBool()) { - SetFuzzedErrNo(m_fuzzed_data_provider, connect_errnos); - return -1; - } - return 0; - } + int Connect(const sockaddr*, socklen_t) const override; - int GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const override - { - constexpr std::array getsockopt_errnos{ - ENOMEM, - ENOBUFS, - }; - if (m_fuzzed_data_provider.ConsumeBool()) { - SetFuzzedErrNo(m_fuzzed_data_provider, getsockopt_errnos); - return -1; - } - if (opt_val == nullptr) { - return 0; - } - std::memcpy(opt_val, - ConsumeFixedLengthByteVector(m_fuzzed_data_provider, *opt_len).data(), - *opt_len); - return 0; - } + int GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const override; bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const override; - bool IsConnected(std::string& errmsg) const override - { - if (m_fuzzed_data_provider.ConsumeBool()) { - return true; - } - errmsg = "disconnected at random by the fuzzer"; - return false; - } + bool IsConnected(std::string& errmsg) const override; }; [[nodiscard]] inline FuzzedSock ConsumeSock(FuzzedDataProvider& fuzzed_data_provider) diff --git a/src/test/fuzz/versionbits.cpp b/src/test/fuzz/versionbits.cpp index 88c1a1a9cb..9186821836 100644 --- a/src/test/fuzz/versionbits.cpp +++ b/src/test/fuzz/versionbits.cpp @@ -29,14 +29,16 @@ public: const int64_t m_end; const int m_period; const int m_threshold; + const int m_min_activation_height; const int m_bit; - TestConditionChecker(int64_t begin, int64_t end, int period, int threshold, int bit) - : m_begin{begin}, m_end{end}, m_period{period}, m_threshold{threshold}, m_bit{bit} + TestConditionChecker(int64_t begin, int64_t end, int period, int threshold, int min_activation_height, int bit) + : m_begin{begin}, m_end{end}, m_period{period}, m_threshold{threshold}, m_min_activation_height{min_activation_height}, m_bit{bit} { assert(m_period > 0); assert(0 <= m_threshold && m_threshold <= m_period); assert(0 <= m_bit && m_bit < 32 && m_bit < VERSIONBITS_NUM_BITS); + assert(0 <= m_min_activation_height); } bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return Condition(pindex->nVersion); } @@ -44,6 +46,7 @@ public: int64_t EndTime(const Consensus::Params& params) const override { return m_end; } int Period(const Consensus::Params& params) const override { return m_period; } int Threshold(const Consensus::Params& params) const override { return m_threshold; } + int MinActivationHeight(const Consensus::Params& params) const override { return m_min_activation_height; } ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, dummy_params, m_cache); } int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, dummy_params, m_cache); } @@ -144,32 +147,27 @@ FUZZ_TARGET_INIT(versionbits, initialize) // pick the timestamp to switch based on a block // note states will change *after* these blocks because mediantime lags int start_block = fuzzed_data_provider.ConsumeIntegralInRange<int>(0, period * (max_periods - 3)); - int end_block = fuzzed_data_provider.ConsumeIntegralInRange<int>(start_block, period * (max_periods - 3)); + int end_block = fuzzed_data_provider.ConsumeIntegralInRange<int>(0, period * (max_periods - 3)); start_time = block_start_time + start_block * interval; timeout = block_start_time + end_block * interval; - assert(start_time <= timeout); - // allow for times to not exactly match a block if (fuzzed_data_provider.ConsumeBool()) start_time += interval / 2; if (fuzzed_data_provider.ConsumeBool()) timeout += interval / 2; - - // this may make timeout too early; if so, don't run the test - if (start_time > timeout) return; } else { if (fuzzed_data_provider.ConsumeBool()) { start_time = Consensus::BIP9Deployment::ALWAYS_ACTIVE; - timeout = Consensus::BIP9Deployment::NO_TIMEOUT; always_active_test = true; } else { - start_time = 1199145601; // January 1, 2008 - timeout = 1230767999; // December 31, 2008 + start_time = Consensus::BIP9Deployment::NEVER_ACTIVE; never_active_test = true; } + timeout = fuzzed_data_provider.ConsumeBool() ? Consensus::BIP9Deployment::NO_TIMEOUT : fuzzed_data_provider.ConsumeIntegral<int64_t>(); } + int min_activation = fuzzed_data_provider.ConsumeIntegralInRange<int>(0, period * max_periods); - TestConditionChecker checker(start_time, timeout, period, threshold, bit); + TestConditionChecker checker(start_time, timeout, period, threshold, min_activation, bit); // Early exit if the versions don't signal sensibly for the deployment if (!checker.Condition(ver_signal)) return; @@ -294,28 +292,35 @@ FUZZ_TARGET_INIT(versionbits, initialize) assert(since == 0); assert(exp_state == ThresholdState::DEFINED); assert(current_block->GetMedianTimePast() < checker.m_begin); - assert(current_block->GetMedianTimePast() < checker.m_end); break; case ThresholdState::STARTED: assert(current_block->GetMedianTimePast() >= checker.m_begin); - assert(current_block->GetMedianTimePast() < checker.m_end); if (exp_state == ThresholdState::STARTED) { assert(blocks_sig < threshold); + assert(current_block->GetMedianTimePast() < checker.m_end); } else { assert(exp_state == ThresholdState::DEFINED); } break; case ThresholdState::LOCKED_IN: - assert(exp_state == ThresholdState::STARTED); - assert(current_block->GetMedianTimePast() < checker.m_end); - assert(blocks_sig >= threshold); + if (exp_state == ThresholdState::LOCKED_IN) { + assert(current_block->nHeight + 1 < min_activation); + } else { + assert(exp_state == ThresholdState::STARTED); + assert(blocks_sig >= threshold); + } break; case ThresholdState::ACTIVE: + assert(always_active_test || min_activation <= current_block->nHeight + 1); assert(exp_state == ThresholdState::ACTIVE || exp_state == ThresholdState::LOCKED_IN); break; case ThresholdState::FAILED: - assert(current_block->GetMedianTimePast() >= checker.m_end); - assert(exp_state != ThresholdState::LOCKED_IN && exp_state != ThresholdState::ACTIVE); + assert(never_active_test || current_block->GetMedianTimePast() >= checker.m_end); + if (exp_state == ThresholdState::STARTED) { + assert(blocks_sig < threshold); + } else { + assert(exp_state == ThresholdState::FAILED); + } break; default: assert(false); @@ -326,26 +331,20 @@ FUZZ_TARGET_INIT(versionbits, initialize) assert(state == ThresholdState::ACTIVE || state == ThresholdState::FAILED); } - // "always active" has additional restrictions if (always_active_test) { + // "always active" has additional restrictions assert(state == ThresholdState::ACTIVE); assert(exp_state == ThresholdState::ACTIVE); assert(since == 0); + } else if (never_active_test) { + // "never active" does too + assert(state == ThresholdState::FAILED); + assert(exp_state == ThresholdState::FAILED); + assert(since == 0); } else { - // except for always active, the initial state is always DEFINED + // for signalled deployments, the initial state is always DEFINED assert(since > 0 || state == ThresholdState::DEFINED); assert(exp_since > 0 || exp_state == ThresholdState::DEFINED); } - - // "never active" does too - if (never_active_test) { - assert(state == ThresholdState::FAILED); - assert(since == period); - if (exp_since == 0) { - assert(exp_state == ThresholdState::DEFINED); - } else { - assert(exp_state == ThresholdState::FAILED); - } - } } } // namespace |