aboutsummaryrefslogtreecommitdiff
path: root/src/test/fuzz/miniscript.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/fuzz/miniscript.cpp')
-rw-r--r--src/test/fuzz/miniscript.cpp950
1 files changed, 946 insertions, 4 deletions
diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp
index 1d6a8d89e4..6ea8a3f185 100644
--- a/src/test/fuzz/miniscript.cpp
+++ b/src/test/fuzz/miniscript.cpp
@@ -14,14 +14,25 @@
namespace {
-//! Some pre-computed data for more efficient string roundtrips.
+//! Some pre-computed data for more efficient string roundtrips and to simulate challenges.
struct TestData {
typedef CPubKey Key;
- // Precomputed public keys.
+ // Precomputed public keys, and a dummy signature for each of them.
std::vector<Key> dummy_keys;
std::map<Key, int> dummy_key_idx_map;
std::map<CKeyID, Key> dummy_keys_map;
+ std::map<Key, std::pair<std::vector<unsigned char>, bool>> dummy_sigs;
+
+ // Precomputed hashes of each kind.
+ std::vector<std::vector<unsigned char>> sha256;
+ std::vector<std::vector<unsigned char>> ripemd160;
+ std::vector<std::vector<unsigned char>> hash256;
+ std::vector<std::vector<unsigned char>> hash160;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> sha256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> ripemd160_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash160_preimages;
//! Set the precomputed data.
void Init() {
@@ -35,6 +46,28 @@ struct TestData {
dummy_keys.push_back(pubkey);
dummy_key_idx_map.emplace(pubkey, i);
dummy_keys_map.insert({pubkey.GetID(), pubkey});
+
+ std::vector<unsigned char> sig;
+ privkey.Sign(uint256S(""), sig);
+ sig.push_back(1); // SIGHASH_ALL
+ dummy_sigs.insert({pubkey, {sig, i & 1}});
+
+ std::vector<unsigned char> hash;
+ hash.resize(32);
+ CSHA256().Write(keydata, 32).Finalize(hash.data());
+ sha256.push_back(hash);
+ if (i & 1) sha256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash256().Write(keydata).Finalize(hash);
+ hash256.push_back(hash);
+ if (i & 1) hash256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ hash.resize(20);
+ CRIPEMD160().Write(keydata, 32).Finalize(hash.data());
+ assert(hash.size() == 20);
+ ripemd160.push_back(hash);
+ if (i & 1) ripemd160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash160().Write(keydata).Finalize(hash);
+ hash160.push_back(hash);
+ if (i & 1) hash160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
}
}
} TEST_DATA;
@@ -59,6 +92,17 @@ struct ParserContext {
return HexStr(Span{&idx, 1});
}
+ std::vector<unsigned char> ToPKBytes(const Key& key) const
+ {
+ return {key.begin(), key.end()};
+ }
+
+ std::vector<unsigned char> ToPKHBytes(const Key& key) const
+ {
+ const auto h = Hash160(key);
+ return {h.begin(), h.end()};
+ }
+
template<typename I>
std::optional<Key> FromString(I first, I last) const {
if (last - first != 2) return {};
@@ -69,7 +113,7 @@ struct ParserContext {
template<typename I>
std::optional<Key> FromPKBytes(I first, I last) const {
- Key key;
+ CPubKey key;
key.Set(first, last);
if (!key.IsValid()) return {};
return key;
@@ -104,7 +148,7 @@ struct ScriptParserContext {
return key.data;
}
- const std::vector<unsigned char> ToPKHBytes(const Key& key) const
+ std::vector<unsigned char> ToPKHBytes(const Key& key) const
{
if (key.is_hash) return key.data;
const auto h = Hash160(key.data);
@@ -130,6 +174,877 @@ struct ScriptParserContext {
}
} SCRIPT_PARSER_CONTEXT;
+//! Context to produce a satisfaction for a Miniscript node using the pre-computed data.
+struct SatisfierContext: ParserContext {
+ // Timelock challenges satisfaction. Make the value (deterministically) vary to explore different
+ // paths.
+ bool CheckAfter(uint32_t value) const { return value % 2; }
+ bool CheckOlder(uint32_t value) const { return value % 2; }
+
+ // Signature challenges fulfilled with a dummy signature, if it was one of our dummy keys.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return miniscript::Availability::NO;
+ if (it->second.second) {
+ // Key is "available"
+ sig = it->second.first;
+ return miniscript::Availability::YES;
+ } else {
+ return miniscript::Availability::NO;
+ }
+ }
+
+ //! Lookup generalization for all the hash satisfactions below
+ miniscript::Availability LookupHash(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage,
+ const std::map<std::vector<unsigned char>, std::vector<unsigned char>>& map) const
+ {
+ const auto it = map.find(hash);
+ if (it == map.end()) return miniscript::Availability::NO;
+ preimage = it->second;
+ return miniscript::Availability::YES;
+ }
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.sha256_preimages);
+ }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.ripemd160_preimages);
+ }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash256_preimages);
+ }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash160_preimages);
+ }
+} SATISFIER_CTX;
+
+//! Context to check a satisfaction against the pre-computed data.
+struct CheckerContext: BaseSignatureChecker {
+ TestData *test_data;
+
+ // Signature checker methods. Checks the right dummy signature is used.
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& vchPubKey,
+ const CScript& scriptCode, SigVersion sigversion) const override
+ {
+ const CPubKey key{vchPubKey};
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return false;
+ return it->second.first == sig;
+ }
+ bool CheckLockTime(const CScriptNum& nLockTime) const override { return nLockTime.GetInt64() & 1; }
+ bool CheckSequence(const CScriptNum& nSequence) const override { return nSequence.GetInt64() & 1; }
+} CHECKER_CTX;
+
+//! Context to check for duplicates when instancing a Node.
+struct KeyComparator {
+ bool KeyCompare(const CPubKey& a, const CPubKey& b) const {
+ return a < b;
+ }
+} KEY_COMP;
+
+// A dummy scriptsig to pass to VerifyScript (we always use Segwit v0).
+const CScript DUMMY_SCRIPTSIG;
+
+using Fragment = miniscript::Fragment;
+using NodeRef = miniscript::NodeRef<CPubKey>;
+using Node = miniscript::Node<CPubKey>;
+using Type = miniscript::Type;
+// https://github.com/llvm/llvm-project/issues/53444
+// NOLINTNEXTLINE(misc-unused-using-decls)
+using miniscript::operator"" _mst;
+
+//! Construct a miniscript node as a shared_ptr.
+template<typename... Args> NodeRef MakeNodeRef(Args&&... args) {
+ return miniscript::MakeNodeRef<CPubKey>(miniscript::internal::NoDupCheck{}, std::forward<Args>(args)...);
+}
+
+/** Information about a yet to be constructed Miniscript node. */
+struct NodeInfo {
+ //! The type of this node
+ Fragment fragment;
+ //! The timelock value for older() and after(), the threshold value for multi() and thresh()
+ uint32_t k;
+ //! Keys for this node, if it has some
+ std::vector<CPubKey> keys;
+ //! The hash value for this node, if it has one
+ std::vector<unsigned char> hash;
+ //! The type requirements for the children of this node.
+ std::vector<Type> subtypes;
+
+ NodeInfo(Fragment frag): fragment(frag), k(0) {}
+ NodeInfo(Fragment frag, CPubKey key): fragment(frag), k(0), keys({key}) {}
+ NodeInfo(Fragment frag, uint32_t _k): fragment(frag), k(_k) {}
+ NodeInfo(Fragment frag, std::vector<unsigned char> h): fragment(frag), k(0), hash(std::move(h)) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag): fragment(frag), k(0), subtypes(std::move(subt)) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag, uint32_t _k): fragment(frag), k(_k), subtypes(std::move(subt)) {}
+ NodeInfo(Fragment frag, uint32_t _k, std::vector<CPubKey> _keys): fragment(frag), k(_k), keys(std::move(_keys)) {}
+};
+
+/** Pick an index in a collection from a single byte in the fuzzer's output. */
+template<typename T, typename A>
+T ConsumeIndex(FuzzedDataProvider& provider, A& col) {
+ const uint8_t i = provider.ConsumeIntegral<uint8_t>();
+ return col[i];
+}
+
+CPubKey ConsumePubKey(FuzzedDataProvider& provider) {
+ return ConsumeIndex<CPubKey>(provider, TEST_DATA.dummy_keys);
+}
+
+std::vector<unsigned char> ConsumeSha256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.sha256);
+}
+
+std::vector<unsigned char> ConsumeHash256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash256);
+}
+
+std::vector<unsigned char> ConsumeRipemd160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.ripemd160);
+}
+
+std::vector<unsigned char> ConsumeHash160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash160);
+}
+
+std::optional<uint32_t> ConsumeTimeLock(FuzzedDataProvider& provider) {
+ const uint32_t k = provider.ConsumeIntegral<uint32_t>();
+ if (k == 0 || k >= 0x80000000) return {};
+ return k;
+}
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This version is intended to have a fixed, stable, encoding for Miniscript nodes:
+ * - The first byte sets the type of the fragment. 0, 1 and all non-leaf fragments but thresh() are a
+ * single byte.
+ * - For the other leaf fragments, the following bytes depend on their type.
+ * - For older() and after(), the next 4 bytes define the timelock value.
+ * - For pk_k(), pk_h(), and all hashes, the next byte defines the index of the value in the test data.
+ * - For multi(), the next 2 bytes define respectively the threshold and the number of keys. Then as many
+ * bytes as the number of keys define the index of each key in the test data.
+ * - For thresh(), the next byte defines the threshold value and the following one the number of subs.
+ */
+std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider, Type type_needed) {
+ bool allow_B = (type_needed == ""_mst) || (type_needed << "B"_mst);
+ bool allow_K = (type_needed == ""_mst) || (type_needed << "K"_mst);
+ bool allow_V = (type_needed == ""_mst) || (type_needed << "V"_mst);
+ bool allow_W = (type_needed == ""_mst) || (type_needed << "W"_mst);
+
+ switch (provider.ConsumeIntegral<uint8_t>()) {
+ case 0:
+ if (!allow_B) return {};
+ return {{Fragment::JUST_0}};
+ case 1:
+ if (!allow_B) return {};
+ return {{Fragment::JUST_1}};
+ case 2:
+ if (!allow_K) return {};
+ return {{Fragment::PK_K, ConsumePubKey(provider)}};
+ case 3:
+ if (!allow_K) return {};
+ return {{Fragment::PK_H, ConsumePubKey(provider)}};
+ case 4: {
+ if (!allow_B) return {};
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::OLDER, *k}};
+ }
+ case 5: {
+ if (!allow_B) return {};
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::AFTER, *k}};
+ }
+ case 6:
+ if (!allow_B) return {};
+ return {{Fragment::SHA256, ConsumeSha256(provider)}};
+ case 7:
+ if (!allow_B) return {};
+ return {{Fragment::HASH256, ConsumeHash256(provider)}};
+ case 8:
+ if (!allow_B) return {};
+ return {{Fragment::RIPEMD160, ConsumeRipemd160(provider)}};
+ case 9:
+ if (!allow_B) return {};
+ return {{Fragment::HASH160, ConsumeHash160(provider)}};
+ case 10: {
+ if (!allow_B) return {};
+ const auto k = provider.ConsumeIntegral<uint8_t>();
+ const auto n_keys = provider.ConsumeIntegral<uint8_t>();
+ if (n_keys > 20 || k == 0 || k > n_keys) return {};
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{Fragment::MULTI, k, std::move(keys)}};
+ }
+ case 11:
+ if (!(allow_B || allow_K || allow_V)) return {};
+ return {{{"B"_mst, type_needed, type_needed}, Fragment::ANDOR}};
+ case 12:
+ if (!(allow_B || allow_K || allow_V)) return {};
+ return {{{"V"_mst, type_needed}, Fragment::AND_V}};
+ case 13:
+ if (!allow_B) return {};
+ return {{{"B"_mst, "W"_mst}, Fragment::AND_B}};
+ case 15:
+ if (!allow_B) return {};
+ return {{{"B"_mst, "W"_mst}, Fragment::OR_B}};
+ case 16:
+ if (!allow_V) return {};
+ return {{{"B"_mst, "V"_mst}, Fragment::OR_C}};
+ case 17:
+ if (!allow_B) return {};
+ return {{{"B"_mst, "B"_mst}, Fragment::OR_D}};
+ case 18:
+ if (!(allow_B || allow_K || allow_V)) return {};
+ return {{{type_needed, type_needed}, Fragment::OR_I}};
+ case 19: {
+ if (!allow_B) return {};
+ auto k = provider.ConsumeIntegral<uint8_t>();
+ auto n_subs = provider.ConsumeIntegral<uint8_t>();
+ if (k == 0 || k > n_subs) return {};
+ std::vector<Type> subtypes;
+ subtypes.reserve(n_subs);
+ subtypes.emplace_back("B"_mst);
+ for (size_t i = 1; i < n_subs; ++i) subtypes.emplace_back("W"_mst);
+ return {{std::move(subtypes), Fragment::THRESH, k}};
+ }
+ case 20:
+ if (!allow_W) return {};
+ return {{{"B"_mst}, Fragment::WRAP_A}};
+ case 21:
+ if (!allow_W) return {};
+ return {{{"B"_mst}, Fragment::WRAP_S}};
+ case 22:
+ if (!allow_B) return {};
+ return {{{"K"_mst}, Fragment::WRAP_C}};
+ case 23:
+ if (!allow_B) return {};
+ return {{{"V"_mst}, Fragment::WRAP_D}};
+ case 24:
+ if (!allow_V) return {};
+ return {{{"B"_mst}, Fragment::WRAP_V}};
+ case 25:
+ if (!allow_B) return {};
+ return {{{"B"_mst}, Fragment::WRAP_J}};
+ case 26:
+ if (!allow_B) return {};
+ return {{{"B"_mst}, Fragment::WRAP_N}};
+ default:
+ break;
+ }
+ return {};
+}
+
+/* This structure contains a table which for each "target" Type a list of recipes
+ * to construct it, automatically inferred from the behavior of ComputeType.
+ * Note that the Types here are not the final types of the constructed Nodes, but
+ * just the subset that are required. For example, a recipe for the "Bo" type
+ * might construct a "Bondu" sha256() NodeInfo, but cannot construct a "Bz" older().
+ * Each recipe is a Fragment together with a list of required types for its subnodes.
+ */
+struct SmartInfo
+{
+ using recipe = std::pair<Fragment, std::vector<Type>>;
+ std::map<Type, std::vector<recipe>> table;
+
+ void Init()
+ {
+ /* Construct a set of interesting type requirements to reason with (sections of BKVWzondu). */
+ std::vector<Type> types;
+ for (int base = 0; base < 4; ++base) { /* select from B,K,V,W */
+ Type type_base = base == 0 ? "B"_mst : base == 1 ? "K"_mst : base == 2 ? "V"_mst : "W"_mst;
+ for (int zo = 0; zo < 3; ++zo) { /* select from z,o,(none) */
+ Type type_zo = zo == 0 ? "z"_mst : zo == 1 ? "o"_mst : ""_mst;
+ for (int n = 0; n < 2; ++n) { /* select from (none),n */
+ if (zo == 0 && n == 1) continue; /* z conflicts with n */
+ if (base == 3 && n == 1) continue; /* W conflicts with n */
+ Type type_n = n == 0 ? ""_mst : "n"_mst;
+ for (int d = 0; d < 2; ++d) { /* select from (none),d */
+ if (base == 2 && d == 1) continue; /* V conflicts with d */
+ Type type_d = d == 0 ? ""_mst : "d"_mst;
+ for (int u = 0; u < 2; ++u) { /* select from (none),u */
+ if (base == 2 && u == 1) continue; /* V conflicts with u */
+ Type type_u = u == 0 ? ""_mst : "u"_mst;
+ Type type = type_base | type_zo | type_n | type_d | type_u;
+ types.push_back(type);
+ }
+ }
+ }
+ }
+ }
+
+ /* We define a recipe a to be a super-recipe of recipe b if they use the same
+ * fragment, the same number of subexpressions, and each of a's subexpression
+ * types is a supertype of the corresponding subexpression type of b.
+ * Within the set of recipes for the construction of a given type requirement,
+ * no recipe should be a super-recipe of another (as the super-recipe is
+ * applicable in every place the sub-recipe is, the sub-recipe is redundant). */
+ auto is_super_of = [](const recipe& a, const recipe& b) {
+ if (a.first != b.first) return false;
+ if (a.second.size() != b.second.size()) return false;
+ for (size_t i = 0; i < a.second.size(); ++i) {
+ if (!(b.second[i] << a.second[i])) return false;
+ }
+ return true;
+ };
+
+ /* Sort the type requirements. Subtypes will always sort later (e.g. Bondu will
+ * sort after Bo or Bu). As we'll be constructing recipes using these types, in
+ * order, in what follows, we'll construct super-recipes before sub-recipes.
+ * That means we never need to go back and delete a sub-recipe because a
+ * super-recipe got added. */
+ std::sort(types.begin(), types.end());
+
+ // Iterate over all possible fragments.
+ for (int fragidx = 0; fragidx <= int(Fragment::MULTI); ++fragidx) {
+ int sub_count = 0; //!< The minimum number of child nodes this recipe has.
+ int sub_range = 1; //!< The maximum number of child nodes for this recipe is sub_count+sub_range-1.
+ size_t data_size = 0;
+ size_t n_keys = 0;
+ uint32_t k = 0;
+ Fragment frag{fragidx};
+
+ // Based on the fragment, determine #subs/data/k/keys to pass to ComputeType. */
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ n_keys = 1;
+ break;
+ case Fragment::MULTI:
+ n_keys = 1;
+ k = 1;
+ break;
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ k = 1;
+ break;
+ case Fragment::SHA256:
+ case Fragment::HASH256:
+ data_size = 32;
+ break;
+ case Fragment::RIPEMD160:
+ case Fragment::HASH160:
+ data_size = 20;
+ break;
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ break;
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ sub_count = 1;
+ break;
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ sub_count = 2;
+ break;
+ case Fragment::ANDOR:
+ sub_count = 3;
+ break;
+ case Fragment::THRESH:
+ // Thresh logic is executed for 1 and 2 arguments. Larger numbers use ad-hoc code to extend.
+ sub_count = 1;
+ sub_range = 2;
+ k = 1;
+ break;
+ }
+
+ // Iterate over the number of subnodes (sub_count...sub_count+sub_range-1).
+ std::vector<Type> subt;
+ for (int subs = sub_count; subs < sub_count + sub_range; ++subs) {
+ // Iterate over the possible subnode types (at most 3).
+ for (Type x : types) {
+ for (Type y : types) {
+ for (Type z : types) {
+ // Compute the resulting type of a node with the selected fragment / subnode types.
+ subt.clear();
+ if (subs > 0) subt.push_back(x);
+ if (subs > 1) subt.push_back(y);
+ if (subs > 2) subt.push_back(z);
+ Type res = miniscript::internal::ComputeType(frag, x, y, z, subt, k, data_size, subs, n_keys);
+ // Continue if the result is not a valid node.
+ if ((res << "K"_mst) + (res << "V"_mst) + (res << "B"_mst) + (res << "W"_mst) != 1) continue;
+
+ recipe entry{frag, subt};
+ auto super_of_entry = [&](const recipe& rec) { return is_super_of(rec, entry); };
+ // Iterate over all supertypes of res (because if e.g. our selected fragment/subnodes result
+ // in a Bondu, they can form a recipe that is also applicable for constructing a B, Bou, Bdu, ...).
+ for (Type s : types) {
+ if ((res & "BKVWzondu"_mst) << s) {
+ auto& recipes = table[s];
+ // If we don't already have a super-recipe to the new one, add it.
+ if (!std::any_of(recipes.begin(), recipes.end(), super_of_entry)) {
+ recipes.push_back(entry);
+ }
+ }
+ }
+
+ if (subs <= 2) break;
+ }
+ if (subs <= 1) break;
+ }
+ if (subs <= 0) break;
+ }
+ }
+ }
+
+ /* Find which types are useful. The fuzzer logic only cares about constructing
+ * B,V,K,W nodes, so any type that isn't needed in any recipe (directly or
+ * indirectly) for the construction of those is uninteresting. */
+ std::set<Type> useful_types{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = useful_types.size();
+ for (const auto& [type, recipes] : table) {
+ if (useful_types.count(type) != 0) {
+ for (const auto& [_, subtypes] : recipes) {
+ for (auto subtype : subtypes) useful_types.insert(subtype);
+ }
+ }
+ }
+ if (useful_types.size() == set_size) break;
+ }
+ // Remove all rules that construct uninteresting types.
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ if (useful_types.count(type_it->first) == 0) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ /* Find which types are constructible. A type is constructible if there is a leaf
+ * node recipe for constructing it, or a recipe whose subnodes are all constructible.
+ * Types can be non-constructible because they have no recipes to begin with,
+ * because they can only be constructed using recipes that involve otherwise
+ * non-constructible types, or because they require infinite recursion. */
+ std::set<Type> constructible_types{};
+ auto known_constructible = [&](Type type) { return constructible_types.count(type) != 0; };
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = constructible_types.size();
+ // Iterate over all types we have recipes for.
+ for (const auto& [type, recipes] : table) {
+ if (!known_constructible(type)) {
+ // For not (yet known to be) constructible types, iterate over their recipes.
+ for (const auto& [_, subt] : recipes) {
+ // If any recipe involves only (already known to be) constructible types,
+ // add the recipe's type to the set.
+ if (std::all_of(subt.begin(), subt.end(), known_constructible)) {
+ constructible_types.insert(type);
+ break;
+ }
+ }
+ }
+ }
+ if (constructible_types.size() == set_size) break;
+ }
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ // Remove all recipes which involve non-constructible types.
+ type_it->second.erase(std::remove_if(type_it->second.begin(), type_it->second.end(),
+ [&](const recipe& rec) {
+ return !std::all_of(rec.second.begin(), rec.second.end(), known_constructible);
+ }), type_it->second.end());
+ // Delete types entirely which have no recipes left.
+ if (type_it->second.empty()) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ for (auto& [type, recipes] : table) {
+ // Sort recipes for determinism, and place those using fewer subnodes first.
+ // This avoids runaway expansion (when reaching the end of the fuzz input,
+ // all zeroes are read, resulting in the first available recipe being picked).
+ std::sort(recipes.begin(), recipes.end(),
+ [](const recipe& a, const recipe& b) {
+ if (a.second.size() < b.second.size()) return true;
+ if (a.second.size() > b.second.size()) return false;
+ return a < b;
+ }
+ );
+ }
+ }
+} SMARTINFO;
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This is similar to ConsumeNodeStable, but uses a precomputed table with permitted
+ * fragments/subnode type for each required type. It is intended to more quickly explore
+ * interesting miniscripts, at the cost of higher implementation complexity (which could
+ * cause it miss things if incorrect), and with less regard for stability of the seeds
+ * (as improvements to the tables or changes to the typing rules could invalidate
+ * everything).
+ */
+std::optional<NodeInfo> ConsumeNodeSmart(FuzzedDataProvider& provider, Type type_needed) {
+ /** Table entry for the requested type. */
+ auto recipes_it = SMARTINFO.table.find(type_needed);
+ assert(recipes_it != SMARTINFO.table.end());
+ /** Pick one recipe from the available ones for that type. */
+ const auto& [frag, subt] = PickValue(provider, recipes_it->second);
+
+ // Based on the fragment the recipe uses, fill in other data (k, keys, data).
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ return {{frag, ConsumePubKey(provider)}};
+ case Fragment::MULTI: {
+ const auto n_keys = provider.ConsumeIntegralInRange<uint8_t>(1, 20);
+ const auto k = provider.ConsumeIntegralInRange<uint8_t>(1, n_keys);
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{frag, k, std::move(keys)}};
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return {{frag, provider.ConsumeIntegralInRange<uint32_t>(1, 0x7FFFFFF)}};
+ case Fragment::SHA256:
+ return {{frag, PickValue(provider, TEST_DATA.sha256)}};
+ case Fragment::HASH256:
+ return {{frag, PickValue(provider, TEST_DATA.hash256)}};
+ case Fragment::RIPEMD160:
+ return {{frag, PickValue(provider, TEST_DATA.ripemd160)}};
+ case Fragment::HASH160:
+ return {{frag, PickValue(provider, TEST_DATA.hash160)}};
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ case Fragment::ANDOR:
+ return {{subt, frag}};
+ case Fragment::THRESH: {
+ uint32_t children;
+ if (subt.size() < 2) {
+ children = subt.size();
+ } else {
+ // If we hit a thresh with 2 subnodes, artificially extend it to any number
+ // (2 or larger) by replicating the type of the last subnode.
+ children = provider.ConsumeIntegralInRange<uint32_t>(2, MAX_OPS_PER_SCRIPT / 2);
+ }
+ auto k = provider.ConsumeIntegralInRange<uint32_t>(1, children);
+ std::vector<Type> subs = subt;
+ while (subs.size() < children) subs.push_back(subs.back());
+ return {{std::move(subs), frag, k}};
+ }
+ }
+
+ assert(false);
+}
+
+/**
+ * Generate a Miniscript node based on the fuzzer's input.
+ *
+ * - ConsumeNode is a function object taking a Type, and returning an std::optional<NodeInfo>.
+ * - root_type is the required type properties of the constructed NodeRef.
+ * - strict_valid sets whether ConsumeNode is expected to guarantee a NodeInfo that results in
+ * a NodeRef whose Type() matches the type fed to ConsumeNode.
+ */
+template<typename F>
+NodeRef GenNode(F ConsumeNode, Type root_type, bool strict_valid = false) {
+ /** A stack of miniscript Nodes being built up. */
+ std::vector<NodeRef> stack;
+ /** The queue of instructions. */
+ std::vector<std::pair<Type, std::optional<NodeInfo>>> todo{{root_type, {}}};
+ /** Predict the number of (static) script ops. */
+ uint32_t ops{0};
+ /** Predict the total script size (every unexplored subnode is counted as one, as every leaf is
+ * at least one script byte). */
+ uint32_t scriptsize{1};
+
+ while (!todo.empty()) {
+ // The expected type we have to construct.
+ auto type_needed = todo.back().first;
+ if (!todo.back().second) {
+ // Fragment/children have not been decided yet. Decide them.
+ auto node_info = ConsumeNode(type_needed);
+ if (!node_info) return {};
+ // Update predicted resource limits. Since every leaf Miniscript node is at least one
+ // byte long, we move one byte from each child to their parent. A similar technique is
+ // used in the miniscript::internal::Parse function to prevent runaway string parsing.
+ scriptsize += miniscript::internal::ComputeScriptLen(node_info->fragment, ""_mst, node_info->subtypes.size(), node_info->k, node_info->subtypes.size(), node_info->keys.size()) - 1;
+ if (scriptsize > MAX_STANDARD_P2WSH_SCRIPT_SIZE) return {};
+ switch (node_info->fragment) {
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ break;
+ case Fragment::PK_K:
+ break;
+ case Fragment::PK_H:
+ ops += 3;
+ break;
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ ops += 1;
+ break;
+ case Fragment::RIPEMD160:
+ case Fragment::SHA256:
+ case Fragment::HASH160:
+ case Fragment::HASH256:
+ ops += 4;
+ break;
+ case Fragment::ANDOR:
+ ops += 3;
+ break;
+ case Fragment::AND_V:
+ break;
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ ops += 1;
+ break;
+ case Fragment::OR_C:
+ ops += 2;
+ break;
+ case Fragment::OR_D:
+ ops += 3;
+ break;
+ case Fragment::OR_I:
+ ops += 3;
+ break;
+ case Fragment::THRESH:
+ ops += node_info->subtypes.size();
+ break;
+ case Fragment::MULTI:
+ ops += 1;
+ break;
+ case Fragment::WRAP_A:
+ ops += 2;
+ break;
+ case Fragment::WRAP_S:
+ ops += 1;
+ break;
+ case Fragment::WRAP_C:
+ ops += 1;
+ break;
+ case Fragment::WRAP_D:
+ ops += 3;
+ break;
+ case Fragment::WRAP_V:
+ // We don't account for OP_VERIFY here; that will be corrected for when the actual
+ // node is constructed below.
+ break;
+ case Fragment::WRAP_J:
+ ops += 4;
+ break;
+ case Fragment::WRAP_N:
+ ops += 1;
+ break;
+ }
+ if (ops > MAX_OPS_PER_SCRIPT) return {};
+ auto subtypes = node_info->subtypes;
+ todo.back().second = std::move(node_info);
+ todo.reserve(todo.size() + subtypes.size());
+ // As elements on the todo stack are processed back to front, construct
+ // them in reverse order (so that the first subnode is generated first).
+ for (size_t i = 0; i < subtypes.size(); ++i) {
+ todo.emplace_back(*(subtypes.rbegin() + i), std::nullopt);
+ }
+ } else {
+ // The back of todo has fragment and number of children decided, and
+ // those children have been constructed at the back of stack. Pop
+ // that entry off todo, and use it to construct a new NodeRef on
+ // stack.
+ NodeInfo& info = *todo.back().second;
+ // Gather children from the back of stack.
+ std::vector<NodeRef> sub;
+ sub.reserve(info.subtypes.size());
+ for (size_t i = 0; i < info.subtypes.size(); ++i) {
+ sub.push_back(std::move(*(stack.end() - info.subtypes.size() + i)));
+ }
+ stack.erase(stack.end() - info.subtypes.size(), stack.end());
+ // Construct new NodeRef.
+ NodeRef node;
+ if (info.keys.empty()) {
+ node = MakeNodeRef(info.fragment, std::move(sub), std::move(info.hash), info.k);
+ } else {
+ assert(sub.empty());
+ assert(info.hash.empty());
+ node = MakeNodeRef(info.fragment, std::move(info.keys), info.k);
+ }
+ // Verify acceptability.
+ if (!node || (node->GetType() & "KVWB"_mst) == ""_mst) {
+ assert(!strict_valid);
+ return {};
+ }
+ if (!(type_needed == ""_mst)) {
+ assert(node->GetType() << type_needed);
+ }
+ if (!node->IsValid()) return {};
+ // Update resource predictions.
+ if (node->fragment == Fragment::WRAP_V && node->subs[0]->GetType() << "x"_mst) {
+ ops += 1;
+ scriptsize += 1;
+ }
+ if (ops > MAX_OPS_PER_SCRIPT) return {};
+ if (scriptsize > MAX_STANDARD_P2WSH_SCRIPT_SIZE) return {};
+ // Move it to the stack.
+ stack.push_back(std::move(node));
+ todo.pop_back();
+ }
+ }
+ assert(stack.size() == 1);
+ assert(stack[0]->GetStaticOps() == ops);
+ assert(stack[0]->ScriptSize() == scriptsize);
+ stack[0]->DuplicateKeyCheck(KEY_COMP);
+ return std::move(stack[0]);
+}
+
+/** Perform various applicable tests on a miniscript Node. */
+void TestNode(const NodeRef& node, FuzzedDataProvider& provider)
+{
+ if (!node) return;
+
+ // Check that it roundtrips to text representation
+ std::optional<std::string> str{node->ToString(PARSER_CTX)};
+ assert(str);
+ auto parsed = miniscript::FromString(*str, PARSER_CTX);
+ assert(parsed);
+ assert(*parsed == *node);
+
+ // Check consistency between script size estimation and real size.
+ auto script = node->ToScript(PARSER_CTX);
+ assert(node->ScriptSize() == script.size());
+
+ // Check consistency of "x" property with the script (type K is excluded, because it can end
+ // with a push of a key, which could match these opcodes).
+ if (!(node->GetType() << "K"_mst)) {
+ bool ends_in_verify = !(node->GetType() << "x"_mst);
+ assert(ends_in_verify == (script.back() == OP_CHECKSIG || script.back() == OP_CHECKMULTISIG || script.back() == OP_EQUAL));
+ }
+
+ // The rest of the checks only apply when testing a valid top-level script.
+ if (!node->IsValidTopLevel()) return;
+
+ // Check roundtrip to script
+ auto decoded = miniscript::FromScript(script, PARSER_CTX);
+ assert(decoded);
+ // Note we can't use *decoded == *node because the miniscript representation may differ, so we check that:
+ // - The script corresponding to that decoded form matches exactly
+ // - The type matches exactly
+ assert(decoded->ToScript(PARSER_CTX) == script);
+ assert(decoded->GetType() == node->GetType());
+
+ if (provider.ConsumeBool() && node->GetOps() < MAX_OPS_PER_SCRIPT && node->ScriptSize() < MAX_STANDARD_P2WSH_SCRIPT_SIZE) {
+ // Optionally pad the script with OP_NOPs to max op the ops limit of the constructed script.
+ // This makes the script obviously not actually miniscript-compatible anymore, but the
+ // signatures constructed in this test don't commit to the script anyway, so the same
+ // miniscript satisfier will work. This increases the sensitivity of the test to the ops
+ // counting logic being too low, especially for simple scripts.
+ // Do this optionally because we're not solely interested in cases where the number of ops is
+ // maximal.
+ // Do not pad more than what would cause MAX_STANDARD_P2WSH_SCRIPT_SIZE to be reached, however,
+ // as that also invalidates scripts.
+ int add = std::min<int>(
+ MAX_OPS_PER_SCRIPT - node->GetOps(),
+ MAX_STANDARD_P2WSH_SCRIPT_SIZE - node->ScriptSize());
+ for (int i = 0; i < add; ++i) script.push_back(OP_NOP);
+ }
+
+ // Run malleable satisfaction algorithm.
+ const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script);
+ CScriptWitness witness_mal;
+ const bool mal_success = node->Satisfy(SATISFIER_CTX, witness_mal.stack, false) == miniscript::Availability::YES;
+ witness_mal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ // Run non-malleable satisfaction algorithm.
+ CScriptWitness witness_nonmal;
+ const bool nonmal_success = node->Satisfy(SATISFIER_CTX, witness_nonmal.stack, true) == miniscript::Availability::YES;
+ witness_nonmal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ if (nonmal_success) {
+ // Non-malleable satisfactions are bounded by GetStackSize().
+ assert(witness_nonmal.stack.size() <= node->GetStackSize());
+ // If a non-malleable satisfaction exists, the malleable one must also exist, and be identical to it.
+ assert(mal_success);
+ assert(witness_nonmal.stack == witness_mal.stack);
+
+ // Test non-malleable satisfaction.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions().
+ if (node->ValidSatisfactions()) assert(res);
+ // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed),
+ // or with a stack size error (if CheckStackSize check failed).
+ assert(res ||
+ (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) ||
+ (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE));
+ }
+
+ if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) {
+ // Test malleable satisfaction only if it's different from the non-malleable one.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only
+ // fail due to stack or ops limits.
+ assert(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE);
+ }
+
+ if (node->IsSane()) {
+ // For sane nodes, the two algorithms behave identically.
+ assert(mal_success == nonmal_success);
+ }
+
+ // Verify that if a node is policy-satisfiable, the malleable satisfaction
+ // algorithm succeeds. Given that under IsSane() both satisfactions
+ // are identical, this implies that for such nodes, the non-malleable
+ // satisfaction will also match the expected policy.
+ bool satisfiable = node->IsSatisfiable([](const Node& node) -> bool {
+ switch (node.fragment) {
+ case Fragment::PK_K:
+ case Fragment::PK_H: {
+ auto it = TEST_DATA.dummy_sigs.find(node.keys[0]);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ return it->second.second;
+ }
+ case Fragment::MULTI: {
+ size_t sats = 0;
+ for (const auto& key : node.keys) {
+ auto it = TEST_DATA.dummy_sigs.find(key);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ sats += it->second.second;
+ }
+ return sats >= node.k;
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return node.k & 1;
+ case Fragment::SHA256:
+ return TEST_DATA.sha256_preimages.count(node.data);
+ case Fragment::HASH256:
+ return TEST_DATA.hash256_preimages.count(node.data);
+ case Fragment::RIPEMD160:
+ return TEST_DATA.ripemd160_preimages.count(node.data);
+ case Fragment::HASH160:
+ return TEST_DATA.hash160_preimages.count(node.data);
+ default:
+ assert(false);
+ }
+ return false;
+ });
+ assert(mal_success == satisfiable);
+}
+
} // namespace
void FuzzInit()
@@ -138,6 +1053,33 @@ void FuzzInit()
TEST_DATA.Init();
}
+void FuzzInitSmart()
+{
+ FuzzInit();
+ SMARTINFO.Init();
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeStable. */
+FUZZ_TARGET_INIT(miniscript_stable, FuzzInit)
+{
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type needed_type) {
+ return ConsumeNodeStable(provider, needed_type);
+ }, ""_mst), provider);
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeSmart. */
+FUZZ_TARGET_INIT(miniscript_smart, FuzzInitSmart)
+{
+ /** The set of types we aim to construct nodes for. Together they cover all. */
+ static constexpr std::array<Type, 4> BASE_TYPES{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type needed_type) {
+ return ConsumeNodeSmart(provider, needed_type);
+ }, PickValue(provider, BASE_TYPES), true), provider);
+}
+
/* Fuzz tests that test parsing from a string, and roundtripping via string. */
FUZZ_TARGET_INIT(miniscript_string, FuzzInit)
{